File size: 5,505 Bytes
2b267d0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
from openai import OpenAI
from dotenv import load_dotenv
import yaml
import os
import json
import re
load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# load prompt
try:
current_dir = os.path.dirname(os.path.abspath(__file__))
prompt_path = os.path.join(current_dir, 'prompt.yaml')
with open(prompt_path, "r", encoding='utf-8') as file:
prompt = yaml.safe_load(file)["prompt"]
except Exception as e:
print(f"Warning: prompt.yaml λ‘λ μ€ν¨. κΈ°λ³Έ ν둬ννΈλ₯Ό μ¬μ©ν©λλ€. μ€λ₯: {e}")
prompt = """
You are an expert resume consultant. Based on the provided guide and user's experiences, create a logical and persuasive story flow for a cover letter answer.
### Guide
{guide}
### User's Experiences
{user_experiences}
Please generate a step-by-step answer flow in JSON format, like {"answer_flow": ["Step 1:...", "Step 2:...", "Step 3:...", "Step 4:..."]}.
"""
def parse_markdown_table_from_response(text: str) -> str | None:
"""
LLM μλ΅μμ markdown tableμ μΆμΆν©λλ€.
Args:
text (str): LLMμ΄ λ°νν μ 체 ν
μ€νΈ μλ΅.
Returns:
str | None: μΆμΆλ markdown table λ¬Έμμ΄, λλ μ€ν¨ μ None.
"""
if not text:
return None
# ```markdown ... ``` νμμ μ½λ λΈλ‘μμ table μΆμΆ
markdown_match = re.search(r"```markdown\s*([\s\S]*?)\s*```", text)
if markdown_match:
table_content = markdown_match.group(1).strip()
return table_content
# μ½λ λΈλ‘μ΄ μλ€λ©΄, μ 체 ν
μ€νΈμμ table μ°ΎκΈ°
lines = text.strip().split('\n')
table_lines = []
for line in lines:
line = line.strip()
if line.startswith('|') and line.endswith('|'):
table_lines.append(line)
if len(table_lines) >= 3: # header, separator, at least one row
return '\n'.join(table_lines)
return None
def parse_json_from_response(text: str) -> dict | None:
"""
Markdown μ½λ λΈλ‘ μμ ν¬ν¨λ μ μλ JSON λ¬Έμμ΄μ μΆμΆνκ³ νμ±ν©λλ€.
(μ΄μ λ²μ κ³Όμ νΈνμ±μ μν΄ μ μ§)
Args:
text (str): LLMμ΄ λ°νν μ 체 ν
μ€νΈ μλ΅.
Returns:
dict | None: νμ±λ λμ
λ리 κ°μ²΄, λλ μ€ν¨ μ None.
"""
if not text:
return None
# ```json ... ``` λλ ``` ... ``` νμμ μ½λ λΈλ‘μμ JSON μΆμΆ
match = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", text)
if match:
json_str = match.group(1)
else:
# μ½λ λΈλ‘μ΄ μλ€λ©΄, μ 체 ν
μ€νΈλ₯Ό JSONμΌλ‘ κ°μ
json_str = text
try:
return json.loads(json_str)
except json.JSONDecodeError:
# μ 체 νμ±μ΄ μ€ν¨νλ©΄, 첫 '{'μ λ§μ§λ§ '}'λ₯Ό κΈ°μ€μΌλ‘ λ€μ μλ
start_index = json_str.find('{')
end_index = json_str.rfind('}')
if start_index != -1 and end_index != -1 and start_index < end_index:
potential_json = json_str[start_index:end_index+1]
try:
return json.loads(potential_json)
except json.JSONDecodeError:
pass # μ΄λ§μ λ μ€ν¨νλ©΄ κ·Έλ₯ None λ°ν
return None
def generate_answer_flow(question, jd, company_name, experience_level, conversation):
"""
λ΅λ³ νλ¦(κ°μ)μ μμ±νκ³ , νμ±λ κ²°κ³Όμ μ 체 μλ΅ κ°μ²΄λ₯Ό λ°ννλ ν¨μ
"""
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt.format(
question=question,
jd=jd,
company_name=company_name,
experience_level=experience_level,
conversation=conversation
)}],
# JSON ννκ° μλλΌ markdown table ννλ‘ μλ΅ λ°κΈ°
)
# Markdown table μλ΅ νμ±
markdown_table = parse_markdown_table_from_response(response.choices[0].message.content)
if markdown_table:
parsed_content = {"flow": markdown_table}
else:
parsed_content = {"error": "Failed to parse markdown table"}
return parsed_content, response
except Exception as e:
print(f"λ΅λ³ νλ¦ μμ± λλ νμ± μ€ μ€λ₯ λ°μ: {e}")
return {"error": f"Failed to generate or parse flow: {str(e)}"}, None
if __name__ == "__main__":
example_input = {
"question": "μΌμ±μ μλ₯Ό μ§μν μ΄μ μ μ
μ¬ ν μ΄λ£¨κ³ μΆμ κΏμ κΈ°μ νμμ€.",
"jd": "μΌμ±μ μλ μΈκ³μ μΈ κΈ°μ κΈ°μ
μΌλ‘, λ€μν λΆμΌμμ μ λμ μΈ κΈ°μ μ κ°λ°νκ³ μμ΅λλ€. νλ‘ νΈμλ κ°λ°μλ‘ μ
μ¬νλ©΄ λ€μν νλ‘μ νΈμ μ°Έμ¬νλ©°, μ΅μ κΈ°μ μ μ μ©νμ¬ μ¬μ©μ κ²½νμ κ°μ ν μ μμ΅λλ€.",
"company_name": "μΌμ±μ μ",
"experience_level": "μ μ
",
"conversation": "User: μλ
νμΈμ, μΌμ±μ μ νλ‘ νΈμλ κ°λ° μ§λ¬΄μ μ§μνλ €κ³ ν©λλ€.\nAI: λ€, μ΄λ€ μ μ΄ κΆκΈνμ κ°μ?\nUser: μκΈ°μκ°μμ μ΄λ€ κ²½νμ κ°μ‘°νλ©΄ μ’μκΉμ?\nAI: νλ‘μ νΈ κ²½νμμ μ¬μ©ν κΈ°μ μ€νκ³Ό μ±κ³Όλ₯Ό ꡬ체μ μΌλ‘ μμ±νλ κ²μ΄ μ’μ΅λλ€."
}
flow_json, _ = generate_answer_flow(**example_input)
print(json.dumps(flow_json, indent=2, ensure_ascii=False))
|