chat-and-answer-completion / llm_functions.py
kyle8581's picture
.
2b267d0
from openai import OpenAI
import yaml
import os
import json
# ํด๋ผ์ด์–ธํŠธ ๋ฐ ํ”„๋กฌํ”„ํŠธ ์ดˆ๊ธฐํ™”
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
try:
current_dir = os.path.dirname(os.path.abspath(__file__))
prompt_path = os.path.join(current_dir, 'prompt.yaml')
with open(prompt_path, "r", encoding='utf-8') as file:
prompts = yaml.safe_load(file)
except Exception as e:
print(f"Warning: prompt.yaml ๋กœ๋“œ ์‹คํŒจ. ๊ธฐ๋ณธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์˜ค๋ฅ˜: {e}")
prompts = {
"Interviewer": "You are a job interviewer.",
"Student": "You are a job applicant.",
"CoverLetter": "Write a cover letter based on the conversation."
}
def get_interviewer_response(example_info):
"""
์ง„ํ–‰๋ฅ (progress)์„ ํฌํ•จํ•œ ๋ฉด์ ‘๊ด€์˜ ์‘๋‹ต์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
"""
# ํ”„๋กฌํ”„ํŠธ ํฌ๋งคํŒ…์— ํ•„์š”ํ•œ ๋ชจ๋“  ๋ณ€์ˆ˜๋ฅผ kwargs๋กœ ๋ฌถ๊ธฐ
format_kwargs = {
**example_info
}
system_prompt = prompts.get("Interviewer", "").format(**format_kwargs)
with open("system_prompt.txt", "w", encoding='utf-8') as f:
f.write(system_prompt)
conversation = [{"role": "system", "content": "You must generate the response in json format."}, {"role": "user", "content": system_prompt}]
# for role, content in messages:
# conversation.append({"role": role, "content": content})
response_stream = client.chat.completions.create(
model="gpt-4.1",
messages=conversation,
stream=True
)
for chunk in response_stream:
yield chunk.choices[0].delta.content or ""
def get_student_response(example_info):
"""ํ•™์ƒ์˜ AI ๋‹ต๋ณ€์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค."""
system_prompt = prompts.get("Student", "").format(**example_info)
conversation = [{"role": "system", "content": "You must generate the response in json format."}]
with open("student_input.txt", "w", encoding='utf-8') as f:
f.write(system_prompt)
# for speaker, content in history:
# conversation.append({"role": "user", "content": f"{speaker}: {content}"})
conversation.append({"role": "user", "content": f"{system_prompt}"})
response_stream = client.chat.completions.create(
model="gpt-4o",
messages=conversation,
stream=True
)
for chunk in response_stream:
yield chunk.choices[0].delta.content or ""
def generate_cover_letter_response(question, conversation_history, example_info, flow, word_limit):
"""
์ง„ํ–‰๋ฅ ์„ ํฌํ•จํ•˜์—ฌ ์ž๊ธฐ์†Œ๊ฐœ์„œ ๋‹ต๋ณ€์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
"""
conversation_text = "\n".join([f"{speaker}: {content}" for speaker, content in conversation_history])
# ์ง„ํ–‰๋ฅ  ํ‘œ์‹œ๋ฅผ ์š”์ฒญํ•˜๋Š” ํ”„๋กฌํ”„ํŠธ
prompt = prompts.get("CoverLetter", "").format(
conversation=conversation_text,
question=question,
flow=flow,
word_limit=word_limit,
**example_info
)
response_stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": prompt}],
stream=True
)
for chunk in response_stream:
yield chunk.choices[0].delta.content or ""