Spaces:
Runtime error
Runtime error
| import os | |
| from typing import List, Dict, Optional | |
| from openai import OpenAI | |
| USE_MOCK = os.getenv("USE_MOCK_LLM", "0") == "1" | |
| OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") | |
| _client = None | |
| # Lazy initialization of OpenAI client | |
| def _client_lazy(): | |
| global _client | |
| if _client is None: | |
| _client = OpenAI() # νκ²½λ³μ OPENAI_API_KEY νμ | |
| return _client | |
| # Mock functions for testing without actual LLM calls | |
| def _mock_questions(user_message: str, user_profile: Dict, restrict_topics: Optional[List[str]] = None) -> List[str]: | |
| # μ¬μ©μ λ©μμ§ κΈ°λ°μΌλ‘ λ€μν mock μ§λ¬Έ μμ± | |
| if "λμΆ" in user_message: | |
| return ["λμΆ κΈλ¦¬λ μ΄λ»κ² κ²°μ λλμ?", "μ μ©λμΆκ³Ό λ΄λ³΄λμΆμ μ°¨μ΄λ?", "λμΆ νλλ μ΄λ»κ² μ°μ λλμ?"] | |
| elif "νλ" in user_message or "ν¬μ" in user_message: | |
| return ["νλμ μ£Όμ ν¬μμ μ°¨μ΄λ?", "μμ μ μΈ νλ μν μΆμ²ν΄μ£ΌμΈμ", "νλ μμ΅λ₯ μ μ΄λ»κ² νμΈνλμ?"] | |
| else: | |
| return [ | |
| "μκΈκ³Ό μ κΈμ μ°¨μ΄κ° λμμ?", | |
| "λ¨κΈ° μ μΆμ λ μ 리ν μνμ 무μμΈκ°μ?", | |
| "μ κΈμ΄ μκΈλ³΄λ€ μ΄μκ° νμ λμκ°μ?", | |
| "μκΈ/μ κΈ μ€λν΄μ§ μ λΆμ΄μ΅μ΄ μλμ?", | |
| "κΈλ¦¬ λΉκ΅ μ μ΄λ€ κΈ°μ€μ λ΄μΌ νλμ?" | |
| ] | |
| return base if 'base' in locals() else [] | |
| def _mock_answer(question: str, context: str) -> str: | |
| return f"λͺ¨μμλ΅: μ§λ¬Έ \"{question}\"μ λν΄ λ¬Έμ λ΄μ©μ λ°νμΌλ‘ μμ½νλ©΄ β {context[:120]}..." | |
| def generate_questions_from_context(user_message: str, user_profile: Dict, similar_docs: List[Dict]) -> List[str]: | |
| """λ²‘ν° DBμμ κ²μλ λ¬Έμ λ΄μ©μ κΈ°λ°μΌλ‘ μ§λ¬Έ μμ±""" | |
| if USE_MOCK: | |
| return _mock_questions(user_message, user_profile, None) | |
| # λ¬Έμ λ΄μ© μΆμΆ λ° ν¬λ§·ν | |
| context_parts = [] | |
| for i, doc in enumerate(similar_docs[:5], 1): | |
| doc_content = doc.get('content', '') or doc.get('text', '') | |
| doc_meta = doc.get('meta', {}) | |
| context_parts.append(f"[λ¬Έμ {i}] {doc_meta}\nλ΄μ©: {doc_content[:200]}...") | |
| context = "\n\n".join(context_parts) | |
| prompt = f""" | |
| λΉμ μ κΈμ΅ μ±λ΄μ 'μ§λ¬Έ μΆμ²' μν μ λλ€. | |
| μ¬μ©μμ μ§λ¬Έκ³Ό κ΄λ ¨λ λ¬Έμ λ΄μ©μ μ°Έκ³ νμ¬, μ¬μ©μκ° μΆκ°λ‘ κΆκΈν΄ν λ§ν μ§λ¬Έ 5κ°λ₯Ό μ μνμΈμ. | |
| **μ€μ**: μλ κ²μλ λ¬Έμ λ΄μ©μ κΈ°λ°νμ¬ λ΅λ³ κ°λ₯ν μ§λ¬Έλ§ μμ±νμΈμ. | |
| μ§λ¬Έμ μ§§κ³ ν΄λ¦νκΈ° μ½κ² λ§λμΈμ. | |
| μ¬μ©μ μ§λ¬Έ: "{user_message}" | |
| μ¬μ©μ νλ‘ν: {user_profile} | |
| [κ²μλ κ΄λ ¨ λ¬Έμ] | |
| {context} | |
| μΆλ ₯μ κ° μ§λ¬Έμ ν μ€μ© λμ΄λ§ νμΈμ. | |
| """ | |
| client = _client_lazy() | |
| resp = client.chat.completions.create( | |
| model=OPENAI_MODEL, | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=0.3, | |
| ) | |
| content = resp.choices[0].message.content.strip() | |
| lines = [l.strip("- ").strip() for l in content.split("\n") if l.strip()] | |
| return lines[:5] | |
| def generate_questions(user_message: str, user_profile: Dict, restrict_topics: Optional[List[str]] = None) -> List[str]: | |
| if USE_MOCK: | |
| return _mock_questions(user_message, user_profile, restrict_topics) | |
| topics_line = f"\nμ°Έκ³ μ£Όμ : {', '.join(restrict_topics)}" if restrict_topics else "" | |
| prompt = f""" | |
| λΉμ μ κΈμ΅ μ±λ΄μ 'μ§λ¬Έ μΆμ²' μν μ λλ€. | |
| μ¬μ©μ μ λ ₯κ³Ό νλ‘νμ μ°Έκ³ νμ¬ κΈμ΅ κ΄λ ¨(μκΈ/μ κΈ, λμΆ, νλ, 보ν, μΉ΄λ λ±) νμ μ§λ¬Έ 5κ°λ₯Ό μ μνμΈμ. | |
| μ¬μ©μμ μ§λ¬Έ μλλ₯Ό μ νν νμ νμ¬ κ΄λ ¨μ± λμ μ§λ¬Έμ μμ±νμΈμ. | |
| μ§λ¬Έμ μ§§κ³ ν΄λ¦νκΈ° μ½κ² λ§λμΈμ. | |
| μ¬μ©μ μ λ ₯: "{user_message}" | |
| μ¬μ©μ νλ‘ν: {user_profile}{topics_line} | |
| μΆλ ₯μ κ° μ§λ¬Έμ ν μ€μ© λμ΄λ§ νμΈμ. | |
| """ | |
| client = _client_lazy() | |
| resp = client.chat.completions.create( | |
| model=OPENAI_MODEL, | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=0.3, | |
| ) | |
| content = resp.choices[0].message.content.strip() | |
| lines = [l.strip("- ").strip() for l in content.split("\n") if l.strip()] | |
| return lines[:5] | |
| def generate_answer(question: str, context: str, user_profile: Optional[Dict] = None) -> str: | |
| if USE_MOCK: | |
| return _mock_answer(question, context) | |
| profile_text = f"\n[μ¬μ©μ νλ‘ν]\n{user_profile}\n" if user_profile else "" | |
| prompt = f""" | |
| λ€μ λ¬Έμ λ΄μ©μ λ°νμΌλ‘ μ¬μ©μμ μ§λ¬Έμ μ ννκ³ κ°κ²°νκ² λ΅νμΈμ. | |
| κΈμ΅ μν(μκΈ/μ κΈ, λμΆ, νλ, 보ν, μΉ΄λ λ±)μ νΉμ§, 쑰건, κΈλ¦¬, νν λ±μ λͺ νν μ€λͺ νμΈμ. | |
| μ¬μ©μ νλ‘νμ΄ μ 곡λ κ²½μ°, ν΄λΉ μ 보λ₯Ό κ³ λ €νμ¬ λ§μΆ€ν λ΅λ³μ μ 곡νμΈμ. | |
| [λ¬Έμ] | |
| {context} | |
| {profile_text} | |
| [μ§λ¬Έ] | |
| {question} | |
| """ | |
| client = _client_lazy() | |
| resp = client.chat.completions.create( | |
| model=OPENAI_MODEL, | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=0.2, | |
| ) | |
| return resp.choices[0].message.content.strip() | |