kyle8581's picture
.
2b267d0
raw
history blame
5.51 kB
from openai import OpenAI
from dotenv import load_dotenv
import yaml
import os
import json
import re
load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# load prompt
try:
current_dir = os.path.dirname(os.path.abspath(__file__))
prompt_path = os.path.join(current_dir, 'prompt.yaml')
with open(prompt_path, "r", encoding='utf-8') as file:
prompt = yaml.safe_load(file)["prompt"]
except Exception as e:
print(f"Warning: prompt.yaml λ‘œλ“œ μ‹€νŒ¨. κΈ°λ³Έ ν”„λ‘¬ν”„νŠΈλ₯Ό μ‚¬μš©ν•©λ‹ˆλ‹€. 였λ₯˜: {e}")
prompt = """
You are an expert resume consultant. Based on the provided guide and user's experiences, create a logical and persuasive story flow for a cover letter answer.
### Guide
{guide}
### User's Experiences
{user_experiences}
Please generate a step-by-step answer flow in JSON format, like {"answer_flow": ["Step 1:...", "Step 2:...", "Step 3:...", "Step 4:..."]}.
"""
def parse_markdown_table_from_response(text: str) -> str | None:
"""
LLM μ‘λ‹΅μ—μ„œ markdown table을 μΆ”μΆœν•©λ‹ˆλ‹€.
Args:
text (str): LLM이 λ°˜ν™˜ν•œ 전체 ν…μŠ€νŠΈ 응닡.
Returns:
str | None: μΆ”μΆœλœ markdown table λ¬Έμžμ—΄, λ˜λŠ” μ‹€νŒ¨ μ‹œ None.
"""
if not text:
return None
# ```markdown ... ``` ν˜•μ‹μ˜ μ½”λ“œ λΈ”λ‘μ—μ„œ table μΆ”μΆœ
markdown_match = re.search(r"```markdown\s*([\s\S]*?)\s*```", text)
if markdown_match:
table_content = markdown_match.group(1).strip()
return table_content
# μ½”λ“œ 블둝이 μ—†λ‹€λ©΄, 전체 ν…μŠ€νŠΈμ—μ„œ table μ°ΎκΈ°
lines = text.strip().split('\n')
table_lines = []
for line in lines:
line = line.strip()
if line.startswith('|') and line.endswith('|'):
table_lines.append(line)
if len(table_lines) >= 3: # header, separator, at least one row
return '\n'.join(table_lines)
return None
def parse_json_from_response(text: str) -> dict | None:
"""
Markdown μ½”λ“œ 블둝 μ•ˆμ— 포함될 수 μžˆλŠ” JSON λ¬Έμžμ—΄μ„ μΆ”μΆœν•˜κ³  νŒŒμ‹±ν•©λ‹ˆλ‹€.
(이전 λ²„μ „κ³Όμ˜ ν˜Έν™˜μ„±μ„ μœ„ν•΄ μœ μ§€)
Args:
text (str): LLM이 λ°˜ν™˜ν•œ 전체 ν…μŠ€νŠΈ 응닡.
Returns:
dict | None: νŒŒμ‹±λœ λ”•μ…”λ„ˆλ¦¬ 객체, λ˜λŠ” μ‹€νŒ¨ μ‹œ None.
"""
if not text:
return None
# ```json ... ``` λ˜λŠ” ``` ... ``` ν˜•μ‹μ˜ μ½”λ“œ λΈ”λ‘μ—μ„œ JSON μΆ”μΆœ
match = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", text)
if match:
json_str = match.group(1)
else:
# μ½”λ“œ 블둝이 μ—†λ‹€λ©΄, 전체 ν…μŠ€νŠΈλ₯Ό JSON으둜 κ°€μ •
json_str = text
try:
return json.loads(json_str)
except json.JSONDecodeError:
# 전체 νŒŒμ‹±μ΄ μ‹€νŒ¨ν•˜λ©΄, 첫 '{'와 λ§ˆμ§€λ§‰ '}'λ₯Ό κΈ°μ€€μœΌλ‘œ λ‹€μ‹œ μ‹œλ„
start_index = json_str.find('{')
end_index = json_str.rfind('}')
if start_index != -1 and end_index != -1 and start_index < end_index:
potential_json = json_str[start_index:end_index+1]
try:
return json.loads(potential_json)
except json.JSONDecodeError:
pass # μ΄λ§ˆμ €λ„ μ‹€νŒ¨ν•˜λ©΄ κ·Έλƒ₯ None λ°˜ν™˜
return None
def generate_answer_flow(question, jd, company_name, experience_level, conversation):
"""
λ‹΅λ³€ 흐름(κ°œμš”)을 μƒμ„±ν•˜κ³ , νŒŒμ‹±λœ 결과와 전체 응닡 객체λ₯Ό λ°˜ν™˜ν•˜λŠ” ν•¨μˆ˜
"""
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt.format(
question=question,
jd=jd,
company_name=company_name,
experience_level=experience_level,
conversation=conversation
)}],
# JSON ν˜•νƒœκ°€ μ•„λ‹ˆλΌ markdown table ν˜•νƒœλ‘œ 응닡 λ°›κΈ°
)
# Markdown table 응닡 νŒŒμ‹±
markdown_table = parse_markdown_table_from_response(response.choices[0].message.content)
if markdown_table:
parsed_content = {"flow": markdown_table}
else:
parsed_content = {"error": "Failed to parse markdown table"}
return parsed_content, response
except Exception as e:
print(f"λ‹΅λ³€ 흐름 생성 λ˜λŠ” νŒŒμ‹± 쀑 였λ₯˜ λ°œμƒ: {e}")
return {"error": f"Failed to generate or parse flow: {str(e)}"}, None
if __name__ == "__main__":
example_input = {
"question": "μ‚Όμ„±μ „μžλ₯Ό μ§€μ›ν•œ μ΄μœ μ™€ μž…μ‚¬ ν›„ 이루고 싢은 κΏˆμ„ κΈ°μˆ ν•˜μ‹œμ˜€.",
"jd": "μ‚Όμ„±μ „μžλŠ” 세계적인 기술 κΈ°μ—…μœΌλ‘œ, λ‹€μ–‘ν•œ λΆ„μ•Όμ—μ„œ 선도적인 κΈ°μˆ μ„ κ°œλ°œν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. ν”„λ‘ νŠΈμ—”λ“œ 개발자둜 μž…μ‚¬ν•˜λ©΄ λ‹€μ–‘ν•œ ν”„λ‘œμ νŠΈμ— μ°Έμ—¬ν•˜λ©°, μ΅œμ‹  κΈ°μˆ μ„ μ μš©ν•˜μ—¬ μ‚¬μš©μž κ²½ν—˜μ„ κ°œμ„ ν•  수 μžˆμŠ΅λ‹ˆλ‹€.",
"company_name": "μ‚Όμ„±μ „μž",
"experience_level": "μ‹ μž…",
"conversation": "User: μ•ˆλ…•ν•˜μ„Έμš”, μ‚Όμ„±μ „μž ν”„λ‘ νŠΈμ—”λ“œ 개발 직무에 μ§€μ›ν•˜λ €κ³  ν•©λ‹ˆλ‹€.\nAI: λ„€, μ–΄λ–€ 점이 κΆκΈˆν•˜μ‹ κ°€μš”?\nUser: μžκΈ°μ†Œκ°œμ„œμ— μ–΄λ–€ κ²½ν—˜μ„ κ°•μ‘°ν•˜λ©΄ μ’‹μ„κΉŒμš”?\nAI: ν”„λ‘œμ νŠΈ κ²½ν—˜μ—μ„œ μ‚¬μš©ν•œ 기술 μŠ€νƒκ³Ό μ„±κ³Όλ₯Ό ꡬ체적으둜 μž‘μ„±ν•˜λŠ” 것이 μ’‹μŠ΅λ‹ˆλ‹€."
}
flow_json, _ = generate_answer_flow(**example_input)
print(json.dumps(flow_json, indent=2, ensure_ascii=False))