chat-and-answer-completion / chat /llm_functions.py
kyle8581's picture
update
d900245
from openai import OpenAI
import yaml
import os
import json
from dotenv import load_dotenv
load_dotenv()
# ํด๋ผ์ด์–ธํŠธ ๋ฐ ํ”„๋กฌํ”„ํŠธ ์ดˆ๊ธฐํ™”
client = OpenAI()
try:
current_dir = os.path.dirname(os.path.abspath(__file__))
prompt_path = os.path.join(current_dir, 'prompt.yaml')
with open(prompt_path, "r", encoding='utf-8') as file:
prompts = yaml.safe_load(file)
except Exception as e:
print(f"Warning: prompt.yaml ๋กœ๋“œ ์‹คํŒจ. ๊ธฐ๋ณธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์˜ค๋ฅ˜: {e}")
prompts = {
"Interviewer": "You are a job interviewer.",
"Student": "You are a job applicant.",
"CoverLetter": "Write a cover letter based on the conversation.",
"Memory": "Create a memory based on the conversation history."
}
def get_interviewer_response(example_info):
"""
์ง„ํ–‰๋ฅ (progress)์„ ํฌํ•จํ•œ ๋ฉด์ ‘๊ด€์˜ ์‘๋‹ต์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
"""
# ํ”„๋กฌํ”„ํŠธ ํฌ๋งคํŒ…์— ํ•„์š”ํ•œ ๋ชจ๋“  ๋ณ€์ˆ˜๋ฅผ kwargs๋กœ ๋ฌถ๊ธฐ
format_kwargs = {
**example_info
}
system_prompt = prompts.get("Interviewer", "").format(**format_kwargs)
with open("system_prompt.txt", "w", encoding='utf-8') as f:
f.write(system_prompt)
conversation = [{"role": "system", "content": "You must generate the response in json format."}, {"role": "user", "content": system_prompt}]
# for role, content in messages:
# conversation.append({"role": role, "content": content})
response_stream = client.chat.completions.create(
model="gpt-4o",
messages=conversation,
stream=True
)
for chunk in response_stream:
yield chunk.choices[0].delta.content or ""
def get_student_response(example_info):
"""ํ•™์ƒ์˜ AI ๋‹ต๋ณ€์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค."""
system_prompt = prompts.get("Student", "").format(**example_info)
conversation = [{"role": "system", "content": "You must generate the response in json format."}]
with open("student_input.txt", "w", encoding='utf-8') as f:
f.write(system_prompt)
# for speaker, content in history:
# conversation.append({"role": "user", "content": f"{speaker}: {content}"})
conversation.append({"role": "user", "content": f"{system_prompt}"})
response_stream = client.chat.completions.create(
model="gpt-4o",
messages=conversation,
stream=True
)
for chunk in response_stream:
yield chunk.choices[0].delta.content or ""
def generate_cover_letter_response(question, conversation_history, example_info, flow, word_limit):
"""
์ง„ํ–‰๋ฅ ์„ ํฌํ•จํ•˜์—ฌ ์ž๊ธฐ์†Œ๊ฐœ์„œ ๋‹ต๋ณ€์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
"""
# conversation_history๊ฐ€ ๋น„์–ด์žˆ์œผ๋ฉด example_info์˜ conversation์„ ์‚ฌ์šฉ
if conversation_history:
conversation_text = "\n".join([f"{speaker}: {content}" for speaker, content in conversation_history])
else:
conversation_text = example_info.get('conversation', '')
# ํ•„์š”ํ•œ ๋ณ€์ˆ˜๋“ค ์ถ”์ถœ
company_name = example_info.get('company_name', 'ํšŒ์‚ฌ')
job_position = example_info.get('job_position', 'ํ•ด๋‹น ์ง๋ฌด')
experience_level = example_info.get('experience_level', '์‹ ์ž…')
# ์ง„ํ–‰๋ฅ  ํ‘œ์‹œ๋ฅผ ์š”์ฒญํ•˜๋Š” ํ”„๋กฌํ”„ํŠธ
prompt = prompts.get("CoverLetter", "").format(
question=question,
guideline=flow,
company_name=company_name,
job_position=job_position,
experience_level=experience_level,
word_limit=word_limit,
conversation=conversation_text
)
response_stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": prompt}],
stream=True
)
for chunk in response_stream:
yield chunk.choices[0].delta.content or ""
def generate_memory(conversation_history, current_memory=""):
"""
๋Œ€ํ™” ๊ธฐ๋ก์„ ๋ฐ”ํƒ•์œผ๋กœ ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
"""
# ๋Œ€ํ™” ๊ธฐ๋ก์„ ๋ฌธ์ž์—ด๋กœ ๋ณ€ํ™˜
if isinstance(conversation_history, list):
conversation_text = "\n".join([f"{speaker}: {content}" for speaker, content in conversation_history])
else:
conversation_text = conversation_history
# Memory ํ”„๋กฌํ”„ํŠธ ์‚ฌ์šฉ
prompt = prompts.get("Memory", "").format(
conversation=conversation_text,
memory=current_memory
)
response_stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": prompt}],
stream=True
)
full_response = ""
for chunk in response_stream:
chunk_content = chunk.choices[0].delta.content or ""
full_response += chunk_content
yield chunk_content
# ์ตœ์ข… ์‘๋‹ต์—์„œ JSON ํŒŒ์‹ฑ ์‹œ๋„
try:
import json
import re
json_match = re.search(r'\{.*\}', full_response, re.DOTALL)
if json_match:
parsed_data = json.loads(json_match.group())
return parsed_data.get('memory', full_response)
except:
pass
return full_response