File size: 5,284 Bytes
2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 2648ef7 2a6f904 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import os
import gradio as gr
from openai import OpenAI
# Initialize OpenAI client (expects OPENAI_API_KEY in env)
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
def make_system_message(system_message, role, jobad, education, workexp, skills, ask_sugg):
msg = (
f"{system_message}\n\n"
f"Role, Industry and Type of Organization: {role}\n"
f"Job Ad Responsibilities and Key Requirements: {jobad}\n"
f"Education, Training and Certifications: {education}\n"
f"Work Experience: {workexp}\n"
f"Skills: {skills}\n"
)
if ask_sugg:
msg += " The user is also asking for suggestions of skills related to this role."
return msg
def stream_chat(
message,
history, # list[list[str, str]] from gr.Chatbot
system_message,
role,
jobad,
education,
workexp,
skills,
ask_sugg,
max_tokens,
temp,
top_p,
):
"""
Streaming generator that yields the progressively updated chat history.
"""
# 1) Build system + conversation messages
sys_msg = make_system_message(system_message, role, jobad, education, workexp, skills, ask_sugg)
messages = [{"role": "system", "content": sys_msg}]
for user_msg, assistant_msg in (history or []):
if user_msg:
messages.append({"role": "user", "content": user_msg})
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
# 2) Start streaming back to the Chatbot
running_reply = ""
# Optimistically show the assistant "typing"
running_history = (history or []) + [[message, ""]]
yield running_history
try:
response = client.chat.completions.create(
model="gpt-4o-mini", # adjust if needed
messages=messages,
max_tokens=int(max_tokens),
temperature=float(temp),
top_p=float(top_p),
stream=True,
)
for chunk in response:
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
token = chunk.choices[0].delta.content
running_reply += token
running_history[-1][1] = running_reply
# Yield the whole history each time so the UI updates
yield running_history
except Exception as e:
running_history[-1][1] = f"❌ An error occurred: {str(e)}"
yield running_history
with gr.Blocks(title="Resumize – Customize your CV!") as demo:
gr.Markdown("""
# Resumize – Customize your CV!
This app customizes your resume to best suit a specific role, industry, employer and job ad.
Powered by OpenAI GPT-4o and domain expertise.
""")
chatbot = gr.Chatbot(height=400)
with gr.Column():
instructions = gr.Textbox(
value=(
"You are a friendly Chatbot, a career coach and a talented copywriter. "
"You are trying to help a user customize their resume according to a specific role, employer organization and job Ad "
"- based on user input. Include tips if some items are missing."
),
label="Instructions to Bot",
lines=4,
)
role = gr.Textbox(label="Role, Industry and Employer", placeholder="Describe the role, industry and employer you are applying to.")
jobad = gr.Textbox(label="Job Ad Responsibilities and Key Requirements", placeholder="Paste/describe the job ad responsibilities and key requirements", lines=4)
education = gr.Textbox(label="Your Education, Certifications, Training, etc.", placeholder="Degrees, diplomas, certifications, courses", lines=3)
workexp = gr.Textbox(label="Your Work Experience", placeholder="Roles, responsibilities, achievements", lines=4)
skills = gr.Textbox(label="Skills", placeholder="List your key skills that match this job or ask for suggestions")
ask_sugg = gr.Checkbox(label="Ask for Skills Suggestions", value=False)
with gr.Row():
max_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens")
temp = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus)")
msg = gr.Textbox(label="Type your message here...", placeholder="e.g., Draft a tailored resume summary")
with gr.Row():
send = gr.Button("Send", variant="primary")
clear = gr.Button("Clear")
# Wire up sending (submit or button) → stream_chat → chatbot
inputs = [msg, chatbot, instructions, role, jobad, education, workexp, skills, ask_sugg, max_tokens, temp, top_p]
outputs = [chatbot]
msg.submit(stream_chat, inputs, outputs)
send.click(stream_chat, inputs, outputs)
# Clear everything
def do_clear():
return [], "", "", "", "", "", "", False, 512, 0.7, 0.95
clear.click(
do_clear,
inputs=[],
outputs=[chatbot, msg, instructions, role, jobad, education, workexp, skills, ask_sugg, max_tokens, temp, top_p],
)
if __name__ == "__main__":
demo.launch()
|