capstone-backend / llm_service.py
dongchan21's picture
Upload 2 files
bb56df9 verified
import os
from typing import List, Dict, Optional
from openai import OpenAI
USE_MOCK = os.getenv("USE_MOCK_LLM", "0") == "1"
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
_client = None
# Lazy initialization of OpenAI client
def _client_lazy():
global _client
if _client is None:
_client = OpenAI() # ν™˜κ²½λ³€μˆ˜ OPENAI_API_KEY ν•„μš”
return _client
# Mock functions for testing without actual LLM calls
def _mock_questions(user_message: str, user_profile: Dict, restrict_topics: Optional[List[str]] = None) -> List[str]:
# μ‚¬μš©μž λ©”μ‹œμ§€ 기반으둜 λ‹€μ–‘ν•œ mock 질문 생성
if "λŒ€μΆœ" in user_message:
return ["λŒ€μΆœ κΈˆλ¦¬λŠ” μ–΄λ–»κ²Œ κ²°μ •λ˜λ‚˜μš”?", "μ‹ μš©λŒ€μΆœκ³Ό λ‹΄λ³΄λŒ€μΆœμ˜ μ°¨μ΄λŠ”?", "λŒ€μΆœ ν•œλ„λŠ” μ–΄λ–»κ²Œ μ‚°μ •λ˜λ‚˜μš”?"]
elif "νŽ€λ“œ" in user_message or "투자" in user_message:
return ["νŽ€λ“œμ™€ 주식 투자의 μ°¨μ΄λŠ”?", "μ•ˆμ •μ μΈ νŽ€λ“œ μƒν’ˆ μΆ”μ²œν•΄μ£Όμ„Έμš”", "νŽ€λ“œ 수읡λ₯ μ€ μ–΄λ–»κ²Œ ν™•μΈν•˜λ‚˜μš”?"]
else:
return [
"예금과 적금의 차이가 λ­μ˜ˆμš”?",
"단기 저좕에 더 μœ λ¦¬ν•œ μƒν’ˆμ€ λ¬΄μ—‡μΈκ°€μš”?",
"적금이 μ˜ˆκΈˆλ³΄λ‹€ μ΄μžκ°€ 항상 λ†’μ€κ°€μš”?",
"예금/적금 쀑도해지 μ‹œ 뢈이읡이 μžˆλ‚˜μš”?",
"금리 비ꡐ μ‹œ μ–΄λ–€ 기쀀을 봐야 ν•˜λ‚˜μš”?"
]
return base if 'base' in locals() else []
def _mock_answer(question: str, context: str) -> str:
return f"λͺ¨μ˜μ‘λ‹΅: 질문 \"{question}\"에 λŒ€ν•΄ λ¬Έμ„œ λ‚΄μš©μ„ λ°”νƒ•μœΌλ‘œ μš”μ•½ν•˜λ©΄ β€” {context[:120]}..."
def generate_questions_from_context(user_message: str, user_profile: Dict, similar_docs: List[Dict]) -> List[str]:
"""벑터 DBμ—μ„œ κ²€μƒ‰λœ λ¬Έμ„œ λ‚΄μš©μ„ 기반으둜 질문 생성"""
if USE_MOCK:
return _mock_questions(user_message, user_profile, None)
# λ¬Έμ„œ λ‚΄μš© μΆ”μΆœ 및 ν¬λ§·νŒ…
context_parts = []
for i, doc in enumerate(similar_docs[:5], 1):
doc_content = doc.get('content', '') or doc.get('text', '')
doc_meta = doc.get('meta', {})
context_parts.append(f"[λ¬Έμ„œ {i}] {doc_meta}\nλ‚΄μš©: {doc_content[:200]}...")
context = "\n\n".join(context_parts)
prompt = f"""
당신은 금육 μ±—λ΄‡μ˜ '질문 μΆ”μ²œ' μ—­ν• μž…λ‹ˆλ‹€.
μ‚¬μš©μžμ˜ 질문과 κ΄€λ ¨λœ λ¬Έμ„œ λ‚΄μš©μ„ μ°Έκ³ ν•˜μ—¬, μ‚¬μš©μžκ°€ μΆ”κ°€λ‘œ κΆκΈˆν•΄ν•  λ§Œν•œ 질문 5개λ₯Ό μ œμ•ˆν•˜μ„Έμš”.
**μ€‘μš”**: μ•„λž˜ κ²€μƒ‰λœ λ¬Έμ„œ λ‚΄μš©μ— κΈ°λ°˜ν•˜μ—¬ λ‹΅λ³€ κ°€λŠ₯ν•œ 질문만 μƒμ„±ν•˜μ„Έμš”.
μ§ˆλ¬Έμ€ μ§§κ³  ν΄λ¦­ν•˜κΈ° μ‰½κ²Œ λ§Œλ“œμ„Έμš”.
μ‚¬μš©μž 질문: "{user_message}"
μ‚¬μš©μž ν”„λ‘œν•„: {user_profile}
[κ²€μƒ‰λœ κ΄€λ ¨ λ¬Έμ„œ]
{context}
좜λ ₯은 각 μ§ˆλ¬Έμ„ ν•œ 쀄씩 λ‚˜μ—΄λ§Œ ν•˜μ„Έμš”.
"""
client = _client_lazy()
resp = client.chat.completions.create(
model=OPENAI_MODEL,
messages=[{"role": "user", "content": prompt}],
temperature=0.3,
)
content = resp.choices[0].message.content.strip()
lines = [l.strip("- ").strip() for l in content.split("\n") if l.strip()]
return lines[:5]
def generate_questions(user_message: str, user_profile: Dict, restrict_topics: Optional[List[str]] = None) -> List[str]:
if USE_MOCK:
return _mock_questions(user_message, user_profile, restrict_topics)
topics_line = f"\n참고 주제: {', '.join(restrict_topics)}" if restrict_topics else ""
prompt = f"""
당신은 금육 μ±—λ΄‡μ˜ '질문 μΆ”μ²œ' μ—­ν• μž…λ‹ˆλ‹€.
μ‚¬μš©μž μž…λ ₯κ³Ό ν”„λ‘œν•„μ„ μ°Έκ³ ν•˜μ—¬ 금육 κ΄€λ ¨(예금/적금, λŒ€μΆœ, νŽ€λ“œ, λ³΄ν—˜, μΉ΄λ“œ λ“±) 후속 질문 5개λ₯Ό μ œμ•ˆν•˜μ„Έμš”.
μ‚¬μš©μžμ˜ 질문 μ˜λ„λ₯Ό μ •ν™•νžˆ νŒŒμ•…ν•˜μ—¬ κ΄€λ ¨μ„± 높은 μ§ˆλ¬Έμ„ μƒμ„±ν•˜μ„Έμš”.
μ§ˆλ¬Έμ€ μ§§κ³  ν΄λ¦­ν•˜κΈ° μ‰½κ²Œ λ§Œλ“œμ„Έμš”.
μ‚¬μš©μž μž…λ ₯: "{user_message}"
μ‚¬μš©μž ν”„λ‘œν•„: {user_profile}{topics_line}
좜λ ₯은 각 μ§ˆλ¬Έμ„ ν•œ 쀄씩 λ‚˜μ—΄λ§Œ ν•˜μ„Έμš”.
"""
client = _client_lazy()
resp = client.chat.completions.create(
model=OPENAI_MODEL,
messages=[{"role": "user", "content": prompt}],
temperature=0.3,
)
content = resp.choices[0].message.content.strip()
lines = [l.strip("- ").strip() for l in content.split("\n") if l.strip()]
return lines[:5]
def generate_answer(question: str, context: str, user_profile: Optional[Dict] = None) -> str:
if USE_MOCK:
return _mock_answer(question, context)
profile_text = f"\n[μ‚¬μš©μž ν”„λ‘œν•„]\n{user_profile}\n" if user_profile else ""
prompt = f"""
λ‹€μŒ λ¬Έμ„œ λ‚΄μš©μ„ λ°”νƒ•μœΌλ‘œ μ‚¬μš©μžμ˜ μ§ˆλ¬Έμ— μ •ν™•ν•˜κ³  κ°„κ²°ν•˜κ²Œ λ‹΅ν•˜μ„Έμš”.
금육 μƒν’ˆ(예금/적금, λŒ€μΆœ, νŽ€λ“œ, λ³΄ν—˜, μΉ΄λ“œ λ“±)의 νŠΉμ§•, 쑰건, 금리, ν˜œνƒ 등을 λͺ…ν™•νžˆ μ„€λͺ…ν•˜μ„Έμš”.
μ‚¬μš©μž ν”„λ‘œν•„μ΄ 제곡된 경우, ν•΄λ‹Ή 정보λ₯Ό κ³ λ €ν•˜μ—¬ λ§žμΆ€ν˜• 닡변을 μ œκ³΅ν•˜μ„Έμš”.
[λ¬Έμ„œ]
{context}
{profile_text}
[질문]
{question}
"""
client = _client_lazy()
resp = client.chat.completions.create(
model=OPENAI_MODEL,
messages=[{"role": "user", "content": prompt}],
temperature=0.2,
)
return resp.choices[0].message.content.strip()