File size: 3,268 Bytes
2b267d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from openai import OpenAI
import yaml
import os
import json

# ํด๋ผ์ด์–ธํŠธ ๋ฐ ํ”„๋กฌํ”„ํŠธ ์ดˆ๊ธฐํ™”
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

try:
    current_dir = os.path.dirname(os.path.abspath(__file__))
    prompt_path = os.path.join(current_dir, 'prompt.yaml')
    with open(prompt_path, "r", encoding='utf-8') as file:
        prompts = yaml.safe_load(file)
except Exception as e:
    print(f"Warning: prompt.yaml ๋กœ๋“œ ์‹คํŒจ. ๊ธฐ๋ณธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์˜ค๋ฅ˜: {e}")
    prompts = {
        "Interviewer": "You are a job interviewer.",
        "Student": "You are a job applicant.",
        "CoverLetter": "Write a cover letter based on the conversation."
    }

def get_interviewer_response(example_info):
    """
    ์ง„ํ–‰๋ฅ (progress)์„ ํฌํ•จํ•œ ๋ฉด์ ‘๊ด€์˜ ์‘๋‹ต์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
    """
    # ํ”„๋กฌํ”„ํŠธ ํฌ๋งคํŒ…์— ํ•„์š”ํ•œ ๋ชจ๋“  ๋ณ€์ˆ˜๋ฅผ kwargs๋กœ ๋ฌถ๊ธฐ
    format_kwargs = {
        **example_info
    }
    system_prompt = prompts.get("Interviewer", "").format(**format_kwargs)
    
    with open("system_prompt.txt", "w", encoding='utf-8') as f:
        f.write(system_prompt)
    conversation = [{"role": "system", "content": "You must generate the response in json format."}, {"role": "user", "content": system_prompt}]
    # for role, content in messages:
    #     conversation.append({"role": role, "content": content})
        
    response_stream = client.chat.completions.create(
        model="gpt-4.1",
        messages=conversation,
        stream=True
    )
    for chunk in response_stream:
        yield chunk.choices[0].delta.content or ""

def get_student_response(example_info):
    """ํ•™์ƒ์˜ AI ๋‹ต๋ณ€์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค."""
    system_prompt = prompts.get("Student", "").format(**example_info)
    
    conversation = [{"role": "system", "content": "You must generate the response in json format."}]
    
    with open("student_input.txt", "w", encoding='utf-8') as f:
        f.write(system_prompt)
    # for speaker, content in history:
    #     conversation.append({"role": "user", "content": f"{speaker}: {content}"})
    
    conversation.append({"role": "user", "content": f"{system_prompt}"})

    response_stream = client.chat.completions.create(
        model="gpt-4o",
        messages=conversation,
        stream=True
    )
    for chunk in response_stream:
        yield chunk.choices[0].delta.content or ""

def generate_cover_letter_response(question, conversation_history, example_info, flow, word_limit):
    """
    ์ง„ํ–‰๋ฅ ์„ ํฌํ•จํ•˜์—ฌ ์ž๊ธฐ์†Œ๊ฐœ์„œ ๋‹ต๋ณ€์„ ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
    """
    conversation_text = "\n".join([f"{speaker}: {content}" for speaker, content in conversation_history])
    
    # ์ง„ํ–‰๋ฅ  ํ‘œ์‹œ๋ฅผ ์š”์ฒญํ•˜๋Š” ํ”„๋กฌํ”„ํŠธ
    prompt = prompts.get("CoverLetter", "").format(
        conversation=conversation_text,
        question=question,
        flow=flow,
        word_limit=word_limit,
        **example_info
    )
    
    response_stream = client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": prompt}],
        stream=True
    )
    for chunk in response_stream:
        yield chunk.choices[0].delta.content or ""