|
|
from openai import OpenAI |
|
|
import yaml |
|
|
import os |
|
|
import json |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
client = OpenAI() |
|
|
|
|
|
try: |
|
|
current_dir = os.path.dirname(os.path.abspath(__file__)) |
|
|
prompt_path = os.path.join(current_dir, 'prompt.yaml') |
|
|
with open(prompt_path, "r", encoding='utf-8') as file: |
|
|
prompts = yaml.safe_load(file) |
|
|
except Exception as e: |
|
|
print(f"Warning: prompt.yaml ๋ก๋ ์คํจ. ๊ธฐ๋ณธ ํ๋กฌํํธ๋ฅผ ์ฌ์ฉํฉ๋๋ค. ์ค๋ฅ: {e}") |
|
|
prompts = { |
|
|
"Interviewer": "You are a job interviewer.", |
|
|
"Student": "You are a job applicant.", |
|
|
"CoverLetter": "Write a cover letter based on the conversation.", |
|
|
"Memory": "Create a memory based on the conversation history." |
|
|
} |
|
|
|
|
|
def get_interviewer_response(example_info): |
|
|
""" |
|
|
์งํ๋ฅ (progress)์ ํฌํจํ ๋ฉด์ ๊ด์ ์๋ต์ ์คํธ๋ฆฌ๋ฐ์ผ๋ก ์์ฑํฉ๋๋ค. |
|
|
""" |
|
|
|
|
|
format_kwargs = { |
|
|
**example_info |
|
|
} |
|
|
system_prompt = prompts.get("Interviewer", "").format(**format_kwargs) |
|
|
|
|
|
with open("system_prompt.txt", "w", encoding='utf-8') as f: |
|
|
f.write(system_prompt) |
|
|
conversation = [{"role": "system", "content": "You must generate the response in json format."}, {"role": "user", "content": system_prompt}] |
|
|
|
|
|
|
|
|
|
|
|
response_stream = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=conversation, |
|
|
stream=True |
|
|
) |
|
|
for chunk in response_stream: |
|
|
yield chunk.choices[0].delta.content or "" |
|
|
|
|
|
def get_student_response(example_info): |
|
|
"""ํ์์ AI ๋ต๋ณ์ ์คํธ๋ฆฌ๋ฐ์ผ๋ก ์์ฑํฉ๋๋ค.""" |
|
|
system_prompt = prompts.get("Student", "").format(**example_info) |
|
|
|
|
|
conversation = [{"role": "system", "content": "You must generate the response in json format."}] |
|
|
|
|
|
with open("student_input.txt", "w", encoding='utf-8') as f: |
|
|
f.write(system_prompt) |
|
|
|
|
|
|
|
|
|
|
|
conversation.append({"role": "user", "content": f"{system_prompt}"}) |
|
|
|
|
|
response_stream = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=conversation, |
|
|
stream=True |
|
|
) |
|
|
for chunk in response_stream: |
|
|
yield chunk.choices[0].delta.content or "" |
|
|
|
|
|
def generate_cover_letter_response(question, conversation_history, example_info, flow, word_limit): |
|
|
""" |
|
|
์งํ๋ฅ ์ ํฌํจํ์ฌ ์๊ธฐ์๊ฐ์ ๋ต๋ณ์ ์คํธ๋ฆฌ๋ฐ์ผ๋ก ์์ฑํฉ๋๋ค. |
|
|
""" |
|
|
|
|
|
if conversation_history: |
|
|
conversation_text = "\n".join([f"{speaker}: {content}" for speaker, content in conversation_history]) |
|
|
else: |
|
|
conversation_text = example_info.get('conversation', '') |
|
|
|
|
|
|
|
|
company_name = example_info.get('company_name', 'ํ์ฌ') |
|
|
job_position = example_info.get('job_position', 'ํด๋น ์ง๋ฌด') |
|
|
experience_level = example_info.get('experience_level', '์ ์
') |
|
|
|
|
|
|
|
|
prompt = prompts.get("CoverLetter", "").format( |
|
|
question=question, |
|
|
guideline=flow, |
|
|
company_name=company_name, |
|
|
job_position=job_position, |
|
|
experience_level=experience_level, |
|
|
word_limit=word_limit, |
|
|
conversation=conversation_text |
|
|
) |
|
|
|
|
|
response_stream = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
stream=True |
|
|
) |
|
|
for chunk in response_stream: |
|
|
yield chunk.choices[0].delta.content or "" |
|
|
|
|
|
def generate_memory(conversation_history, current_memory=""): |
|
|
""" |
|
|
๋ํ ๊ธฐ๋ก์ ๋ฐํ์ผ๋ก ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. |
|
|
""" |
|
|
|
|
|
if isinstance(conversation_history, list): |
|
|
conversation_text = "\n".join([f"{speaker}: {content}" for speaker, content in conversation_history]) |
|
|
else: |
|
|
conversation_text = conversation_history |
|
|
|
|
|
|
|
|
prompt = prompts.get("Memory", "").format( |
|
|
conversation=conversation_text, |
|
|
memory=current_memory |
|
|
) |
|
|
|
|
|
response_stream = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
stream=True |
|
|
) |
|
|
|
|
|
full_response = "" |
|
|
for chunk in response_stream: |
|
|
chunk_content = chunk.choices[0].delta.content or "" |
|
|
full_response += chunk_content |
|
|
yield chunk_content |
|
|
|
|
|
|
|
|
try: |
|
|
import json |
|
|
import re |
|
|
json_match = re.search(r'\{.*\}', full_response, re.DOTALL) |
|
|
if json_match: |
|
|
parsed_data = json.loads(json_match.group()) |
|
|
return parsed_data.get('memory', full_response) |
|
|
except: |
|
|
pass |
|
|
|
|
|
return full_response |