|
|
import os |
|
|
import sys |
|
|
|
|
|
|
|
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) |
|
|
if project_root not in sys.path: |
|
|
sys.path.insert(0, project_root) |
|
|
|
|
|
from dotenv import load_dotenv |
|
|
import gradio as gr |
|
|
import yaml |
|
|
import json |
|
|
import re |
|
|
from chat.llm_functions import get_interviewer_response, get_student_response, generate_cover_letter_response, generate_memory |
|
|
from utils import parse_json_from_response |
|
|
from guide_generation.llm_functions import generate_guide as create_guide_from_llm |
|
|
from answer_flow_generation.llm_functions import generate_answer_flow |
|
|
|
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open("example_info.json", "r", encoding='utf-8') as f: |
|
|
|
|
|
default_info = json.load(f) |
|
|
|
|
|
if 'word_limit' not in default_info: |
|
|
default_info['word_limit'] = 300 |
|
|
|
|
|
def user_submit(message, history): |
|
|
"""์ฌ์ฉ์ ์
๋ ฅ์ ์ฒ๋ฆฌํ๊ณ , ์ฑ๋ด ๊ธฐ๋ก์ ์
๋ฐ์ดํธํฉ๋๋ค.""" |
|
|
if not message.strip(): |
|
|
return "", history |
|
|
history.append([message, None]) |
|
|
return "", history |
|
|
|
|
|
def clean_markdown_response(text): |
|
|
""" |
|
|
LLM ์๋ต์์ markdown ์ฝ๋ ๋ธ๋ก์ ์ ๊ฑฐํ๊ณ ์ค์ ๋ด์ฉ๋ง ์ถ์ถํฉ๋๋ค. |
|
|
|
|
|
Args: |
|
|
text (str): LLM ์๋ต ํ
์คํธ |
|
|
|
|
|
Returns: |
|
|
str: ์ ๋ฆฌ๋ ํ
์คํธ |
|
|
""" |
|
|
if not text: |
|
|
return text |
|
|
|
|
|
|
|
|
import re |
|
|
|
|
|
|
|
|
markdown_match = re.search(r"```(?:markdown)?\s*([\s\S]*?)\s*```", text) |
|
|
if markdown_match: |
|
|
return markdown_match.group(1).strip() |
|
|
|
|
|
|
|
|
code_match = re.search(r"```\s*([\s\S]*?)\s*```", text) |
|
|
if code_match: |
|
|
return code_match.group(1).strip() |
|
|
|
|
|
|
|
|
return text.strip() |
|
|
|
|
|
def bot_response(history, shared_info, progress=gr.Progress()): |
|
|
"""๋ฉด์ ๊ด์ ์๋ต์ ์์ฑํ๊ณ ์งํ๋ฅ ์ ์
๋ฐ์ดํธํฉ๋๋ค.""" |
|
|
if not history or history[-1][1] is not None: |
|
|
return history, gr.update(), gr.update() |
|
|
|
|
|
conversation_str = "" |
|
|
for h in history: |
|
|
conversation_str += f"ํ์: {h[0]}\n" |
|
|
if h[1]: |
|
|
conversation_str += f"AI: {h[1]}\n" |
|
|
|
|
|
format_info = shared_info.copy() |
|
|
format_info['conversation'] = conversation_str |
|
|
|
|
|
if 'word_limit' not in format_info: |
|
|
format_info['word_limit'] = 300 |
|
|
|
|
|
if 'memory' not in format_info: |
|
|
format_info['memory'] = "" |
|
|
|
|
|
history[-1][1] = "" |
|
|
full_response = "" |
|
|
for chunk in get_interviewer_response(format_info): |
|
|
full_response += chunk |
|
|
history[-1][1] = full_response |
|
|
yield history, gr.update(), gr.update() |
|
|
|
|
|
final_data = parse_json_from_response(full_response) |
|
|
final_progress_update = gr.update() |
|
|
final_reason_update = gr.update() |
|
|
if final_data: |
|
|
history[-1][1] = final_data.get("answer", "์๋ต์ ์ฒ๋ฆฌํ๋ ๋ฐ ์คํจํ์ต๋๋ค.") |
|
|
final_progress = final_data.get("progress", 0) |
|
|
reasoning = final_data.get("reasoning_for_progress", "") |
|
|
|
|
|
if isinstance(final_progress, int) and 0 <= final_progress <= 100: |
|
|
progress(final_progress / 100) |
|
|
final_progress_update = f"์๊ธฐ์๊ฐ์ ์์ฑ๋: {final_progress}%" |
|
|
if reasoning: |
|
|
final_reason_update = gr.update(value=f"**์งํ ์ํฉ ๋ถ์:** {reasoning}", visible=True) |
|
|
else: |
|
|
final_reason_update = gr.update(visible=False) |
|
|
|
|
|
if final_progress >= 100: |
|
|
history.append([None, "๋ฉด์ ์ด ์ข
๋ฃ๋์์ต๋๋ค. ์๊ธฐ์๊ฐ์ ์์ฑ ํญ์ผ๋ก ์ด๋ํ์ธ์."]) |
|
|
|
|
|
yield history, final_progress_update, final_reason_update |
|
|
|
|
|
|
|
|
def generate_ai_reply(history, shared_info, progress=gr.Progress()): |
|
|
"""ํ์์ AI ๋ต๋ณ์ ์์ฑํ๊ณ , ๊ทธ์ ๋ํ ๋ฉด์ ๊ด์ ํ์ ์ง๋ฌธ์ ๋ฐ์ต๋๋ค.""" |
|
|
if not history or not history[-1][1]: |
|
|
return history, gr.update(), gr.update() |
|
|
|
|
|
conversation_str = "" |
|
|
for h in history: |
|
|
conversation_str += f"ํ์: {h[0]}\n" |
|
|
if h[1]: |
|
|
conversation_str += f"AI: {h[1]}\n" |
|
|
|
|
|
format_info = shared_info.copy() |
|
|
format_info['conversation'] = conversation_str |
|
|
|
|
|
if 'word_limit' not in format_info: |
|
|
format_info['word_limit'] = 300 |
|
|
|
|
|
if 'memory' not in format_info: |
|
|
format_info['memory'] = "" |
|
|
|
|
|
student_answer_json = "" |
|
|
history.append(["", None]) |
|
|
for chunk in get_student_response(format_info): |
|
|
student_answer_json += chunk |
|
|
parsed_data = parse_json_from_response(student_answer_json) |
|
|
if parsed_data: |
|
|
history[-1][0] = parsed_data.get("answer", "") |
|
|
else: |
|
|
history[-1][0] = student_answer_json |
|
|
yield history, gr.update(), gr.update() |
|
|
|
|
|
final_data = parse_json_from_response(student_answer_json) |
|
|
if final_data: |
|
|
history[-1][0] = final_data.get("answer", "์๋ต์ ์ฒ๋ฆฌํ๋ ๋ฐ ์คํจํ์ต๋๋ค.") |
|
|
yield history, gr.update(), gr.update() |
|
|
|
|
|
yield from bot_response(history, shared_info, progress=progress) |
|
|
|
|
|
def generate_all_cover_letters(history, shared_info, progress=gr.Progress()): |
|
|
"""๋ชจ๋ ์๊ธฐ์๊ฐ์ ๋ฌธํญ์ ๋ํ ๋ต๋ณ์ ์์ฑํ๊ณ ์งํ๋ฅ ์ ํ์ํฉ๋๋ค.""" |
|
|
if not history: |
|
|
empty_outputs = [gr.update(value="๋ฉด์ ๋ํ๊ฐ ์์ต๋๋ค.")] * len(shared_info.get('questions', [])) |
|
|
empty_guidelines = [gr.update(value="")] * len(shared_info.get('questions', [])) |
|
|
return empty_outputs + empty_guidelines + [gr.update(), gr.update()] |
|
|
|
|
|
|
|
|
conversation_str = "" |
|
|
for h in history: |
|
|
if h[0]: conversation_str += f"ํ์: {h[0]}\n" |
|
|
if h[1]: conversation_str += f"AI: {h[1]}\n" |
|
|
|
|
|
total_questions = len(shared_info.get('questions', [])) |
|
|
outputs = [""] * total_questions |
|
|
guidelines = [""] * total_questions |
|
|
|
|
|
format_info = shared_info.copy() |
|
|
format_info['conversation'] = conversation_str |
|
|
|
|
|
for i, question in enumerate(shared_info.get('questions', [])): |
|
|
|
|
|
progress_text = f"์๊ธฐ์๊ฐ์ ์์ฑ ์งํ๋ฅ : {int((i / total_questions) * 40)}% (๋ต๋ณ ํ๋ฆ ์์ฑ ์ค...)" |
|
|
yield [gr.update(value=o) for o in outputs] + [gr.update(value=g) for g in guidelines] + [gr.update(value=progress_text, visible=True), gr.update()] |
|
|
|
|
|
flow_result, _ = generate_answer_flow( |
|
|
question=question, |
|
|
jd=format_info.get('jd', ''), |
|
|
company_name=format_info.get('company_name', ''), |
|
|
experience_level=format_info.get('experience_level', '์ ์
'), |
|
|
conversation=conversation_str |
|
|
) |
|
|
|
|
|
flow_text = flow_result.get('flow', '') if flow_result else '' |
|
|
guidelines[i] = flow_text |
|
|
|
|
|
|
|
|
progress_text = f"์๊ธฐ์๊ฐ์ ์์ฑ ์งํ๋ฅ : {int((i / total_questions) * 40 + 30)}% (๋ต๋ณ ์์ฑ ์ค...)" |
|
|
yield [gr.update(value=o) for o in outputs] + [gr.update(value=g) for g in guidelines] + [gr.update(value=progress_text, visible=True), gr.update()] |
|
|
|
|
|
full_response = "" |
|
|
word_limit = shared_info.get('word_limit', 300) |
|
|
for chunk in generate_cover_letter_response(question, [], format_info, flow_text, word_limit): |
|
|
full_response += chunk |
|
|
parsed_data = parse_json_from_response(full_response) |
|
|
if parsed_data and 'answer' in parsed_data: |
|
|
|
|
|
cleaned_answer = clean_markdown_response(parsed_data['answer']) |
|
|
outputs[i] = cleaned_answer |
|
|
else: |
|
|
|
|
|
cleaned_response = clean_markdown_response(full_response) |
|
|
outputs[i] = cleaned_response |
|
|
|
|
|
overall_progress_val = (i + 0.75) / total_questions * 0.7 |
|
|
progress_text = f"์๊ธฐ์๊ฐ์ ์์ฑ ์งํ๋ฅ : {int(overall_progress_val*100)}%" |
|
|
yield [gr.update(value=o) for o in outputs] + [gr.update(value=g) for g in guidelines] + [gr.update(value=progress_text, visible=True), gr.update()] |
|
|
|
|
|
|
|
|
final_data = parse_json_from_response(full_response) |
|
|
if final_data and 'answer' in final_data: |
|
|
|
|
|
cleaned_answer = clean_markdown_response(final_data['answer']) |
|
|
outputs[i] = cleaned_answer |
|
|
else: |
|
|
|
|
|
cleaned_response = clean_markdown_response(full_response) |
|
|
outputs[i] = cleaned_response |
|
|
|
|
|
|
|
|
progress_text = "์๊ธฐ์๊ฐ์ ์์ฑ ์งํ๋ฅ : 85% (๋ํ ๋ฉ๋ชจ๋ฆฌ ์์ฑ ์ค...)" |
|
|
yield [gr.update(value=o) for o in outputs] + [gr.update(value=g) for g in guidelines] + [gr.update(value=progress_text, visible=True), gr.update()] |
|
|
|
|
|
memory_content = "" |
|
|
current_memory = shared_info.get('memory', '') |
|
|
for chunk in generate_memory(conversation_str, current_memory): |
|
|
memory_content += chunk |
|
|
|
|
|
|
|
|
memory_text = memory_content |
|
|
try: |
|
|
parsed_memory = parse_json_from_response(memory_content) |
|
|
if parsed_memory and 'memory' in parsed_memory: |
|
|
memory_text = parsed_memory['memory'] |
|
|
except: |
|
|
pass |
|
|
|
|
|
progress_text = "์๊ธฐ์๊ฐ์ ์์ฑ ์งํ๋ฅ : 100% (์๋ฃ)" |
|
|
yield [gr.update(value=o) for o in outputs] + [gr.update(value=g) for g in guidelines] + [gr.update(value=progress_text, visible=True), gr.update(value=memory_text)] |
|
|
|
|
|
|
|
|
yield [gr.update(value=o) for o in outputs] + [gr.update(value=g) for g in guidelines] + [gr.update(visible=False), gr.update(value=memory_text)] |
|
|
|
|
|
def update_guide_and_info(company, position, jd, questions_str, word_limit): |
|
|
guide_json, _ = create_guide_from_llm(questions_str, jd, company, "์ ์
") |
|
|
|
|
|
if guide_json and "guide" in guide_json: |
|
|
guide_text = guide_json["guide"] |
|
|
else: |
|
|
guide_text = "๊ฐ์ด๋ ์์ฑ์ ์คํจํ์ต๋๋ค. ์
๋ ฅ๊ฐ์ ํ์ธํด์ฃผ์ธ์." |
|
|
|
|
|
new_info = default_info.copy() |
|
|
new_info.update({ |
|
|
"company_name": company, |
|
|
"position_title": position, |
|
|
"jd": jd, |
|
|
"questions": [q.strip() for q in questions_str.strip().split('\n') if q.strip()], |
|
|
"guide": guide_text, |
|
|
"word_limit": word_limit, |
|
|
"memory": "" |
|
|
}) |
|
|
|
|
|
|
|
|
return new_info, guide_text |
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
|
shared_info = gr.State(default_info) |
|
|
|
|
|
with gr.Tabs() as tabs: |
|
|
with gr.TabItem("๊ฐ์ด๋ ์์ฑ", id=0): |
|
|
gr.Markdown("## ๐ ์๊ธฐ์๊ฐ์ ์ ๋ณด ์
๋ ฅ") |
|
|
gr.Markdown("๋ฉด์ ์๋ฎฌ๋ ์ด์
์ ํ์ํ ์ ๋ณด๋ฅผ ์
๋ ฅํ๊ณ '๊ฐ์ด๋ ์์ฑ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ธ์.") |
|
|
with gr.Row(): |
|
|
company_name_input = gr.Textbox(label="ํ์ฌ๋ช
", value=default_info.get("company_name")) |
|
|
position_title_input = gr.Textbox(label="์ง๋ฌด๋ช
", value=default_info.get("position_title")) |
|
|
jd_input = gr.Textbox(label="Job Description (JD)", lines=5, value=default_info.get("jd")) |
|
|
questions_input = gr.Textbox(label="์๊ธฐ์๊ฐ์ ์ง๋ฌธ (ํ ์ค์ ํ ๊ฐ์ฉ)", lines=3, value="\n".join(default_info.get("questions", []))) |
|
|
|
|
|
with gr.Row(): |
|
|
word_limit_input = gr.Number( |
|
|
label="์๊ธฐ์๊ฐ์ ๊ธ์์ ์ ํ", |
|
|
value=300, |
|
|
minimum=100, |
|
|
maximum=1000, |
|
|
step=50, |
|
|
info="์๊ธฐ์๊ฐ์ ๊ฐ ๋ฌธํญ๋ณ ๊ธ์์ ์ ํ์ ์ค์ ํ์ธ์." |
|
|
) |
|
|
|
|
|
generate_guide_btn = gr.Button("๊ฐ์ด๋ ์์ฑ", variant="primary") |
|
|
guide_output = gr.Markdown(label="์์ฑ๋ ๊ฐ์ด๋", value=f"**๊ฐ์ด๋:**\n{default_info.get('guide')}") |
|
|
|
|
|
with gr.TabItem("๋ฉด์ ๋ํ", id=1): |
|
|
gr.Markdown("## ๐ฌ ๋ฉด์ ์๋ฎฌ๋ ์ด์
") |
|
|
gr.Markdown("๋ฉด์ ๊ด์ ์ง๋ฌธ์ ๋ต๋ณํ๊ฑฐ๋, 'AI ๋ต๋ณ ์์ฑ' ๋ฒํผ์ ๋๋ฌ๋ณด์ธ์. ๋ฉด์ ๊ด์ด ํ๋จํ๋ ์๊ธฐ์๊ฐ์ ์์ฑ๋๊ฐ 100%๊ฐ ๋๋ฉด ๋ฉด์ ์ด ์ข
๋ฃ๋ฉ๋๋ค.") |
|
|
|
|
|
with gr.Row(): |
|
|
progress_display = gr.Markdown("์๊ธฐ์๊ฐ์ ์์ฑ๋: 0%") |
|
|
reason_display = gr.Markdown("", visible=False) |
|
|
chatbot = gr.Chatbot(label="๋ฉด์ ๋ํ", bubble_full_width=False, avatar_images=("๐ค", "๐"), height=500) |
|
|
msg = gr.Textbox(label="๋ฉ์์ง ์
๋ ฅ", placeholder="๋ฉ์์ง๋ฅผ ์
๋ ฅํ์ธ์...", lines=2) |
|
|
with gr.Row(): |
|
|
submit_btn = gr.Button("์ ์ก", variant="primary") |
|
|
ai_reply_btn = gr.Button("AI ๋ต๋ณ ์์ฑ", variant="secondary") |
|
|
clear_btn = gr.Button("์ด๊ธฐํ") |
|
|
|
|
|
with gr.TabItem("์๊ธฐ์๊ฐ์ ์์ฑ", id=2): |
|
|
gr.Markdown("## ๐ ์๊ธฐ์๊ฐ์ ๋ต๋ณ ์์ฑ") |
|
|
gr.Markdown("๋ฉด์ ์ด ์๋ฃ๋๋ฉด ๋ํ ๋ด์ฉ์ ๋ฐํ์ผ๋ก ์๊ธฐ์๊ฐ์ ๋ต๋ณ์ ์์ฑํฉ๋๋ค.") |
|
|
|
|
|
generate_btn = gr.Button("์๊ธฐ์๊ฐ์ ์์ฑ ์์", variant="primary", size="lg") |
|
|
cover_letter_progress_display = gr.Markdown("", visible=False) |
|
|
|
|
|
cover_letter_outputs = [] |
|
|
guideline_outputs = [] |
|
|
for i, question in enumerate(default_info.get('questions', [])): |
|
|
with gr.Accordion(f"๋ฌธํญ {i+1}: {question[:50]}...", open=True): |
|
|
gr.Markdown(f"**{question}**") |
|
|
|
|
|
with gr.Tabs(): |
|
|
with gr.TabItem("์์ฑ๋ ๋ต๋ณ"): |
|
|
output = gr.Textbox( |
|
|
label=f"๋ต๋ณ {i+1}", |
|
|
lines=8, |
|
|
max_lines=20, |
|
|
interactive=False, |
|
|
show_copy_button=True, |
|
|
placeholder="์๊ธฐ์๊ฐ์ ๋ต๋ณ์ด ์์ฑ๋๋ฉด ์ฌ๊ธฐ์ ํ์๋ฉ๋๋ค...", |
|
|
info="๊ธด ๋ต๋ณ์ ๊ฒฝ์ฐ ์คํฌ๋กคํ์ฌ ์ ์ฒด ๋ด์ฉ์ ํ์ธํ ์ ์์ต๋๋ค." |
|
|
) |
|
|
cover_letter_outputs.append(output) |
|
|
|
|
|
with gr.TabItem("๋ต๋ณ ๊ฐ์ด๋๋ผ์ธ"): |
|
|
guideline = gr.Markdown(value="๊ฐ์ด๋๋ผ์ธ์ด ์์ฑ๋๋ฉด ์ฌ๊ธฐ์ ํ์๋ฉ๋๋ค.") |
|
|
guideline_outputs.append(guideline) |
|
|
|
|
|
|
|
|
with gr.Accordion("๐ญ ๋ํ ๋ฉ๋ชจ๋ฆฌ", open=False): |
|
|
gr.Markdown("๋ํ ๋ด์ฉ์ ๋ฐํ์ผ๋ก ์์ฑ๋ ๋ฉ๋ชจ๋ฆฌ์
๋๋ค.") |
|
|
memory_display = gr.Markdown(value="๋ํ ๋ฉ๋ชจ๋ฆฌ๊ฐ ์์ฑ๋๋ฉด ์ฌ๊ธฐ์ ํ์๋ฉ๋๋ค.", label="๋ํ ๋ฉ๋ชจ๋ฆฌ") |
|
|
|
|
|
|
|
|
generate_guide_btn.click( |
|
|
fn=update_guide_and_info, |
|
|
inputs=[company_name_input, position_title_input, jd_input, questions_input, word_limit_input], |
|
|
outputs=[shared_info, guide_output] |
|
|
) |
|
|
|
|
|
submit_btn.click(user_submit, [msg, chatbot], [msg, chatbot]).then(bot_response, [chatbot, shared_info], [chatbot, progress_display, reason_display]) |
|
|
msg.submit(user_submit, [msg, chatbot], [msg, chatbot]).then(bot_response, [chatbot, shared_info], [chatbot, progress_display, reason_display]) |
|
|
ai_reply_btn.click(generate_ai_reply, [chatbot, shared_info], [chatbot, progress_display, reason_display]) |
|
|
clear_btn.click(lambda: ([], "์๊ธฐ์๊ฐ์ ์์ฑ๋: 0%", ""), None, [chatbot, progress_display, reason_display], queue=False) |
|
|
generate_btn.click(generate_all_cover_letters, [chatbot, shared_info], cover_letter_outputs + guideline_outputs + [cover_letter_progress_display, memory_display]) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch(share=True) |