|
|
from openai import OpenAI |
|
|
import yaml |
|
|
import os |
|
|
import json |
|
|
|
|
|
|
|
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
|
|
try: |
|
|
current_dir = os.path.dirname(os.path.abspath(__file__)) |
|
|
prompt_path = os.path.join(current_dir, 'prompt.yaml') |
|
|
with open(prompt_path, "r", encoding='utf-8') as file: |
|
|
prompts = yaml.safe_load(file) |
|
|
except Exception as e: |
|
|
print(f"Warning: prompt.yaml ๋ก๋ ์คํจ. ๊ธฐ๋ณธ ํ๋กฌํํธ๋ฅผ ์ฌ์ฉํฉ๋๋ค. ์ค๋ฅ: {e}") |
|
|
prompts = { |
|
|
"Interviewer": "You are a job interviewer.", |
|
|
"Student": "You are a job applicant.", |
|
|
"CoverLetter": "Write a cover letter based on the conversation." |
|
|
} |
|
|
|
|
|
def get_interviewer_response(example_info): |
|
|
""" |
|
|
์งํ๋ฅ (progress)์ ํฌํจํ ๋ฉด์ ๊ด์ ์๋ต์ ์คํธ๋ฆฌ๋ฐ์ผ๋ก ์์ฑํฉ๋๋ค. |
|
|
""" |
|
|
|
|
|
format_kwargs = { |
|
|
**example_info |
|
|
} |
|
|
system_prompt = prompts.get("Interviewer", "").format(**format_kwargs) |
|
|
|
|
|
with open("system_prompt.txt", "w", encoding='utf-8') as f: |
|
|
f.write(system_prompt) |
|
|
conversation = [{"role": "system", "content": "You must generate the response in json format."}, {"role": "user", "content": system_prompt}] |
|
|
|
|
|
|
|
|
|
|
|
response_stream = client.chat.completions.create( |
|
|
model="gpt-4.1", |
|
|
messages=conversation, |
|
|
stream=True |
|
|
) |
|
|
for chunk in response_stream: |
|
|
yield chunk.choices[0].delta.content or "" |
|
|
|
|
|
def get_student_response(example_info): |
|
|
"""ํ์์ AI ๋ต๋ณ์ ์คํธ๋ฆฌ๋ฐ์ผ๋ก ์์ฑํฉ๋๋ค.""" |
|
|
system_prompt = prompts.get("Student", "").format(**example_info) |
|
|
|
|
|
conversation = [{"role": "system", "content": "You must generate the response in json format."}] |
|
|
|
|
|
with open("student_input.txt", "w", encoding='utf-8') as f: |
|
|
f.write(system_prompt) |
|
|
|
|
|
|
|
|
|
|
|
conversation.append({"role": "user", "content": f"{system_prompt}"}) |
|
|
|
|
|
response_stream = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=conversation, |
|
|
stream=True |
|
|
) |
|
|
for chunk in response_stream: |
|
|
yield chunk.choices[0].delta.content or "" |
|
|
|
|
|
def generate_cover_letter_response(question, conversation_history, example_info, flow, word_limit): |
|
|
""" |
|
|
์งํ๋ฅ ์ ํฌํจํ์ฌ ์๊ธฐ์๊ฐ์ ๋ต๋ณ์ ์คํธ๋ฆฌ๋ฐ์ผ๋ก ์์ฑํฉ๋๋ค. |
|
|
""" |
|
|
conversation_text = "\n".join([f"{speaker}: {content}" for speaker, content in conversation_history]) |
|
|
|
|
|
|
|
|
prompt = prompts.get("CoverLetter", "").format( |
|
|
conversation=conversation_text, |
|
|
question=question, |
|
|
flow=flow, |
|
|
word_limit=word_limit, |
|
|
**example_info |
|
|
) |
|
|
|
|
|
response_stream = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
stream=True |
|
|
) |
|
|
for chunk in response_stream: |
|
|
yield chunk.choices[0].delta.content or "" |