|
|
from openai import OpenAI |
|
|
from dotenv import load_dotenv |
|
|
import yaml |
|
|
import os |
|
|
import json |
|
|
import re |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
|
|
|
|
|
try: |
|
|
current_dir = os.path.dirname(os.path.abspath(__file__)) |
|
|
prompt_path = os.path.join(current_dir, 'prompt.yaml') |
|
|
with open(prompt_path, "r", encoding='utf-8') as file: |
|
|
prompt = yaml.safe_load(file)["prompt"] |
|
|
except Exception as e: |
|
|
print(f"Warning: prompt.yaml λ‘λ μ€ν¨. κΈ°λ³Έ ν둬ννΈλ₯Ό μ¬μ©ν©λλ€. μ€λ₯: {e}") |
|
|
prompt = """ |
|
|
You are an expert resume consultant. Based on the provided guide and user's experiences, create a logical and persuasive story flow for a cover letter answer. |
|
|
|
|
|
### Guide |
|
|
{guide} |
|
|
|
|
|
### User's Experiences |
|
|
{user_experiences} |
|
|
|
|
|
Please generate a step-by-step answer flow in JSON format, like {"answer_flow": ["Step 1:...", "Step 2:...", "Step 3:...", "Step 4:..."]}. |
|
|
""" |
|
|
|
|
|
def parse_markdown_table_from_response(text: str) -> str | None: |
|
|
""" |
|
|
LLM μλ΅μμ markdown tableμ μΆμΆν©λλ€. |
|
|
|
|
|
Args: |
|
|
text (str): LLMμ΄ λ°νν μ 체 ν
μ€νΈ μλ΅. |
|
|
|
|
|
Returns: |
|
|
str | None: μΆμΆλ markdown table λ¬Έμμ΄, λλ μ€ν¨ μ None. |
|
|
""" |
|
|
if not text: |
|
|
return None |
|
|
|
|
|
|
|
|
markdown_match = re.search(r"```markdown\s*([\s\S]*?)\s*```", text) |
|
|
if markdown_match: |
|
|
table_content = markdown_match.group(1).strip() |
|
|
return table_content |
|
|
|
|
|
|
|
|
lines = text.strip().split('\n') |
|
|
table_lines = [] |
|
|
|
|
|
for line in lines: |
|
|
line = line.strip() |
|
|
if line.startswith('|') and line.endswith('|'): |
|
|
table_lines.append(line) |
|
|
|
|
|
if len(table_lines) >= 3: |
|
|
return '\n'.join(table_lines) |
|
|
|
|
|
return None |
|
|
|
|
|
def parse_json_from_response(text: str) -> dict | None: |
|
|
""" |
|
|
Markdown μ½λ λΈλ‘ μμ ν¬ν¨λ μ μλ JSON λ¬Έμμ΄μ μΆμΆνκ³ νμ±ν©λλ€. |
|
|
(μ΄μ λ²μ κ³Όμ νΈνμ±μ μν΄ μ μ§) |
|
|
|
|
|
Args: |
|
|
text (str): LLMμ΄ λ°νν μ 체 ν
μ€νΈ μλ΅. |
|
|
|
|
|
Returns: |
|
|
dict | None: νμ±λ λμ
λ리 κ°μ²΄, λλ μ€ν¨ μ None. |
|
|
""" |
|
|
if not text: |
|
|
return None |
|
|
|
|
|
|
|
|
match = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", text) |
|
|
if match: |
|
|
json_str = match.group(1) |
|
|
else: |
|
|
|
|
|
json_str = text |
|
|
|
|
|
try: |
|
|
return json.loads(json_str) |
|
|
except json.JSONDecodeError: |
|
|
|
|
|
start_index = json_str.find('{') |
|
|
end_index = json_str.rfind('}') |
|
|
if start_index != -1 and end_index != -1 and start_index < end_index: |
|
|
potential_json = json_str[start_index:end_index+1] |
|
|
try: |
|
|
return json.loads(potential_json) |
|
|
except json.JSONDecodeError: |
|
|
pass |
|
|
|
|
|
return None |
|
|
|
|
|
def generate_answer_flow(question, jd, company_name, experience_level, conversation): |
|
|
""" |
|
|
λ΅λ³ νλ¦(κ°μ)μ μμ±νκ³ , νμ±λ κ²°κ³Όμ μ 체 μλ΅ κ°μ²΄λ₯Ό λ°ννλ ν¨μ |
|
|
""" |
|
|
try: |
|
|
response = client.chat.completions.create( |
|
|
model="gpt-4o-mini", |
|
|
messages=[{"role": "user", "content": prompt.format( |
|
|
question=question, |
|
|
jd=jd, |
|
|
company_name=company_name, |
|
|
experience_level=experience_level, |
|
|
conversation=conversation |
|
|
)}], |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
markdown_table = parse_markdown_table_from_response(response.choices[0].message.content) |
|
|
|
|
|
if markdown_table: |
|
|
parsed_content = {"flow": markdown_table} |
|
|
else: |
|
|
parsed_content = {"error": "Failed to parse markdown table"} |
|
|
|
|
|
return parsed_content, response |
|
|
|
|
|
except Exception as e: |
|
|
print(f"λ΅λ³ νλ¦ μμ± λλ νμ± μ€ μ€λ₯ λ°μ: {e}") |
|
|
return {"error": f"Failed to generate or parse flow: {str(e)}"}, None |
|
|
|
|
|
if __name__ == "__main__": |
|
|
example_input = { |
|
|
"question": "μΌμ±μ μλ₯Ό μ§μν μ΄μ μ μ
μ¬ ν μ΄λ£¨κ³ μΆμ κΏμ κΈ°μ νμμ€.", |
|
|
"jd": "μΌμ±μ μλ μΈκ³μ μΈ κΈ°μ κΈ°μ
μΌλ‘, λ€μν λΆμΌμμ μ λμ μΈ κΈ°μ μ κ°λ°νκ³ μμ΅λλ€. νλ‘ νΈμλ κ°λ°μλ‘ μ
μ¬νλ©΄ λ€μν νλ‘μ νΈμ μ°Έμ¬νλ©°, μ΅μ κΈ°μ μ μ μ©νμ¬ μ¬μ©μ κ²½νμ κ°μ ν μ μμ΅λλ€.", |
|
|
"company_name": "μΌμ±μ μ", |
|
|
"experience_level": "μ μ
", |
|
|
"conversation": "User: μλ
νμΈμ, μΌμ±μ μ νλ‘ νΈμλ κ°λ° μ§λ¬΄μ μ§μνλ €κ³ ν©λλ€.\nAI: λ€, μ΄λ€ μ μ΄ κΆκΈνμ κ°μ?\nUser: μκΈ°μκ°μμ μ΄λ€ κ²½νμ κ°μ‘°νλ©΄ μ’μκΉμ?\nAI: νλ‘μ νΈ κ²½νμμ μ¬μ©ν κΈ°μ μ€νκ³Ό μ±κ³Όλ₯Ό ꡬ체μ μΌλ‘ μμ±νλ κ²μ΄ μ’μ΅λλ€." |
|
|
} |
|
|
|
|
|
flow_json, _ = generate_answer_flow(**example_input) |
|
|
print(json.dumps(flow_json, indent=2, ensure_ascii=False)) |
|
|
|