ใ……ใ…Žใ…‡
Add CodeWeaver Gradio app
515f392
from typing import Any, Dict, List, Optional, Literal, Tuple, Annotated
from operator import add
from pydantic import BaseModel, Field
from langchain_core.messages import BaseMessage
from langgraph.graph import add_messages
_STEPS_RESET_TOKEN = "__RESET_STEPS__"
_MULTI_ANS_RESET_TOKEN = "__RESET_MULTI_ANS__"
def merge_intermediate_steps(old: List[str], new: List[str]) -> List[str]:
"""
intermediate_steps reducer.
- ๊ธฐ๋ณธ ๋™์ž‘: old + new (๋ณ‘๋ ฌ ๋…ธ๋“œ์—์„œ ๋™์‹œ์— step์„ ์ถ”๊ฐ€ ๊ฐ€๋Šฅ)
- ๋ฆฌ์…‹ ๋™์ž‘: new์˜ ์ฒซ ์›์†Œ๊ฐ€ _STEPS_RESET_TOKEN ์ด๋ฉด old๋ฅผ ๋ฒ„๋ฆฌ๊ณ  new[1:]๋กœ ๊ต์ฒด
(์ฒดํฌํฌ์ธํŒ…์œผ๋กœ ๋ˆ„์ ๋œ step์„ '์ด๋ฒˆ ์‹คํ–‰(run)' ๊ธฐ์ค€์œผ๋กœ ์ดˆ๊ธฐํ™”ํ•˜๊ธฐ ์œ„ํ•จ)
"""
if not new:
return old
if new[0] == _STEPS_RESET_TOKEN:
return new[1:]
return old + new
def merge_multi_answers(old: List[Dict[str, Any]], new: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
multi_answers reducer.
- ๊ธฐ๋ณธ ๋™์ž‘: old + new (๋ณ‘๋ ฌ worker์—์„œ ๋‹ต๋ณ€์„ ๋™์‹œ์— append ๊ฐ€๋Šฅ)
- ๋ฆฌ์…‹ ๋™์ž‘: new์˜ ์ฒซ ์›์†Œ๊ฐ€ {"__token__": _MULTI_ANS_RESET_TOKEN} ์ด๋ฉด
old๋ฅผ ๋ฒ„๋ฆฌ๊ณ  new[1:]๋กœ ๊ต์ฒด
(์ฒดํฌํฌ์ธํŒ…/์Šค๋ ˆ๋“œ ์œ ์ง€๋กœ ์ธํ•ด ์ด์ „ ํ„ด์˜ multi_answers๊ฐ€ ๋ˆ„์ ๋˜๋Š” ๋ฌธ์ œ ๋ฐฉ์ง€)
"""
if not new:
return old
head = new[0]
if isinstance(head, dict) and head.get("__token__") == _MULTI_ANS_RESET_TOKEN:
return new[1:]
return old + new
class SearchResult(BaseModel):
"""๊ฒ€์ƒ‰ ๋„๋ฉ”์ธ์—์„œ ๊ณตํ†ต์œผ๋กœ ์‚ฌ์šฉํ•˜๋Š” ๋‹จ์ผ ๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ ๋ชจ๋ธ."""
source: str = Field(
...,
description="๊ฒ€์ƒ‰ ์ถœ์ฒ˜ (์˜ˆ: Stack Overflow, ๊ณต์‹ ๋ฌธ์„œ, GitHub Issues ๋“ฑ)",
)
content: str = Field(
...,
description="๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ์˜ ํ•ต์‹ฌ ๋‚ด์šฉ ๋˜๋Š” ๋ฐœ์ทŒ ํ…์ŠคํŠธ",
)
url: Optional[str] = Field(
default=None,
description="๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ์˜ ์›๋ณธ ์ถœ์ฒ˜ URL (์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ์—๋งŒ ์„ค์ •)",
)
relevance_score: Optional[float] = Field(
default=None,
description="๊ฒ€์ƒ‰ ์ฟผ๋ฆฌ์™€์˜ ๊ด€๋ จ๋„ ์ ์ˆ˜ (0.0โ€“1.0 ๋ฒ”์œ„, ํด์ˆ˜๋ก ๋” ๊ด€๋ จ ์žˆ์Œ)",
)
class AgentState(BaseModel):
"""CodeWeaver LangGraph ์—์ด์ „ํŠธ์˜ ์ „์ฒด ์ƒํƒœ๋ฅผ ๋‚˜ํƒ€๋‚ด๋Š” Pydantic ๋ชจ๋ธ.
LangGraph ๊ณต์‹ ๊ฐ€์ด๋“œ๋ผ์ธ:
- Pydantic BaseModel ์‚ฌ์šฉ (ํƒ€์ž… ์•ˆ์ „์„ฑ)
- messages ํ•„๋“œ์— add_messages reducer ์ ์šฉ
- ๋ชจ๋“  ํ•„๋“œ์— ๊ธฐ๋ณธ๊ฐ’ ์ œ๊ณต
"""
# Core fields
user_question: str = Field(default="", description="์‚ฌ์šฉ์ž์˜ ์›๋ณธ ์งˆ๋ฌธ")
messages: Annotated[List[BaseMessage], add_messages] = Field(
default_factory=list,
description="๋Œ€ํ™” ๋ฉ”์‹œ์ง€ ํžˆ์Šคํ† ๋ฆฌ (add_messages reducer ์‚ฌ์šฉ)"
)
# Legacy conversation history (์œ ์ง€ํ•˜๋˜ messages ์šฐ์„ )
conversation_history: Optional[List[Tuple[str, str]]] = Field(
default=None,
description="๋ ˆ๊ฑฐ์‹œ ๋Œ€ํ™” ๋‚ด์—ญ (messages ์šฐ์„  ์‚ฌ์šฉ)"
)
# Intent classification
detected_intent: Optional[Literal["debugging", "learning", "code_review"]] = Field(
default=None,
description="๋ถ„๋ฅ˜๋œ ์งˆ๋ฌธ ์˜๋„"
)
# Cache-related
cached_result: Optional[str] = Field(
default=None,
description="๋ฒกํ„ฐ DB ์บ์‹œ์—์„œ ์กฐํšŒ๋œ ๋‹ต๋ณ€"
)
# Search results (Send API๋ฅผ ์œ„ํ•œ reducer ์‚ฌ์šฉ)
search_results: Annotated[List[SearchResult], add] = Field(
default_factory=list,
description="๋ณ‘๋ ฌ ๊ฒ€์ƒ‰์œผ๋กœ ์ˆ˜์ง‘๋œ ๊ฒฐ๊ณผ ๋ฆฌ์ŠคํŠธ (Send API๋กœ ๋ณ‘๋ ฌ ์—…๋ฐ์ดํŠธ)"
)
# Intermediate processing
subtask_results: Dict[str, Any] = Field(
default_factory=dict,
description="์„œ๋ธŒํƒœ์Šคํฌ ์‹คํ–‰ ๊ฒฐ๊ณผ ์ €์žฅ์†Œ"
)
# Final output
final_answer: Optional[str] = Field(
default=None,
description="์ตœ์ข… ์ƒ์„ฑ๋œ ๋‹ต๋ณ€"
)
# Debugging/tracing (๋ณ‘๋ ฌ ๋…ธ๋“œ + ์‹คํ–‰ ๋‹จ์œ„ ๋ฆฌ์…‹ ์ง€์› reducer ์‚ฌ์šฉ)
intermediate_steps: Annotated[List[str], merge_intermediate_steps] = Field(
default_factory=list,
description="์‹คํ–‰ ๋‹จ๊ณ„๋ณ„ ๋กœ๊ทธ (๋ณ‘๋ ฌ ๋…ธ๋“œ์—์„œ ๋™์‹œ ์—…๋ฐ์ดํŠธ ๊ฐ€๋Šฅ)"
)
# Question analysis & cache eligibility
question_type: Optional[Literal["clarification", "new_topic", "independent"]] = Field(
default=None,
description="์งˆ๋ฌธ ์œ ํ˜• ๋ถ„๋ฅ˜ ๊ฒฐ๊ณผ"
)
analysis_reasoning: Optional[str] = Field(
default=None,
description="์งˆ๋ฌธ ๋ถ„์„ ์ด์œ "
)
should_cache: Optional[bool] = Field(
default=None,
description="์บ์‹œ ์ €์žฅ ์—ฌ๋ถ€"
)
canonical_question: Optional[str] = Field(
default=None,
description="์ •๊ทœํ™”๋œ ์งˆ๋ฌธ (์บ์‹œ์šฉ)"
)
# Planning & Refinement (Phase 3: Open Deep Research pattern)
plan: Optional[Dict[str, Any]] = Field(
default=None,
description="์งˆ๋ฌธ ๋ถ„ํ•ด ๊ณ„ํš: {'sub_questions': [...], 'reasoning': '...'}"
)
needs_refinement: bool = Field(
default=False,
description="๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ๊ฐ€ ๋ถ€์กฑํ•˜์—ฌ ์ฟผ๋ฆฌ ๊ฐœ์„  ํ•„์š” ์—ฌ๋ถ€"
)
refinement_count: int = Field(
default=0,
description="๊ฒ€์ƒ‰ ์ฟผ๋ฆฌ ๊ฐœ์„  ์‹œ๋„ ํšŸ์ˆ˜ (์ตœ๋Œ€ 1ํšŒ)"
)
original_question: Optional[str] = Field(
default=None,
description="์ฟผ๋ฆฌ ๊ฐœ์„  ์ „ ์›๋ณธ ์งˆ๋ฌธ (์ตœ์ข… ๋‹ต๋ณ€ ์ƒ์„ฑ ์‹œ ์ฐธ์กฐ)"
)
# Phase 4: Dynamic Parallel Search for Multiple Questions
is_multi_question: bool = Field(
default=False,
description="ํ˜„์žฌ ๋‹ค์ค‘ ์งˆ๋ฌธ ์ฒ˜๋ฆฌ ์ค‘์ธ์ง€ ์—ฌ๋ถ€"
)
sub_question_index: int = Field(
default=0,
description="์„œ๋ธŒ ์งˆ๋ฌธ ์ธ๋ฑ์Šค (0๋ถ€ํ„ฐ ์‹œ์ž‘)"
)
sub_question_text: Optional[str] = Field(
default=None,
description="ํ˜„์žฌ ์ฒ˜๋ฆฌ ์ค‘์ธ ์„œ๋ธŒ ์งˆ๋ฌธ ํ…์ŠคํŠธ"
)
original_multi_question: Optional[str] = Field(
default=None,
description="๋‹ค์ค‘ ์งˆ๋ฌธ์˜ ์›๋ณธ ์งˆ๋ฌธ (ํ†ตํ•ฉ ๋‹ต๋ณ€ ์ƒ์„ฑ ์‹œ ์ฐธ์กฐ)"
)
multi_answers: Annotated[List[Dict[str, Any]], merge_multi_answers] = Field(
default_factory=list,
description="๋‹ค์ค‘ ์งˆ๋ฌธ์˜ ๊ฐ ๋‹ต๋ณ€ ๋ฆฌ์ŠคํŠธ (reducer๋กœ ์ž๋™ ๋ณ‘ํ•ฉ)"
)
class Config:
arbitrary_types_allowed = True