File size: 3,794 Bytes
da8882a
 
 
 
 
 
 
016231e
da8882a
 
016231e
da8882a
016231e
 
87a3780
016231e
 
da8882a
 
 
 
016231e
da8882a
 
87a3780
 
1830dee
87a3780
016231e
da8882a
 
016231e
da8882a
016231e
 
 
 
 
da8882a
 
016231e
da8882a
 
 
87a3780
da8882a
 
 
 
 
 
 
 
 
 
 
 
 
 
016231e
da8882a
 
 
 
016231e
da8882a
 
016231e
 
 
87a3780
016231e
da8882a
016231e
87a3780
da8882a
 
016231e
87a3780
da8882a
87a3780
da8882a
 
 
 
016231e
 
 
da8882a
 
 
 
016231e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import os
import gradio as gr
from openai import OpenAI

# Initialize the OpenAI client
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))

# Streaming response function
def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    role,
    ad,
    requirements,
    education,
    skills,
    max_tokens,
    temperature,
    top_p,
):
    # Construct the system message
    enhanced_system_message = (
        f"{system_message}\n\n"
        f"Role, Industry and Employer: {role}\n"
        f"Job Ad Responsibilities: {ad}\n"
        f"Job Key Requirements: {requirements}\n"
            f"A Summary about the User's Education and Work Experience: {education}\n"
        f"Skills: {skills}\n"
    )

    # Compose the conversation
    messages = [{"role": "system", "content": enhanced_system_message}]
    for user_msg, assistant_msg in history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if assistant_msg:
            messages.append({"role": "assistant", "content": assistant_msg})
    messages.append({"role": "user", "content": message})

    # Generate and stream response
    response_text = ""
    try:
        response = client.chat.completions.create(
            model="gpt-4o-mini",  # use "gpt-4o" if needed
            messages=messages,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=True,
        )
        for chunk in response:
            if chunk.choices and chunk.choices[0].delta.content:
                token = chunk.choices[0].delta.content
                response_text += token
                yield response_text
    except Exception as e:
        yield f"❌ An error occurred: {str(e)}"

# Gradio interface
demo = gr.ChatInterface(
    fn=respond,
    additional_inputs=[
        gr.Textbox(
            value="You are a friendly Chatbot, a career coach and with experience as a recruiter. You are trying to help a user practice STAR-style interview questions customized for a specific role, employer organization and job Ad - based on user input. Include tips if some items are missing.",
            label="Instructions to Bot",
        ),
        gr.Textbox(label="Role, Industry and Employer", placeholder="Describe the role, industry and employer you are applying to."),
        gr.Textbox(
            label="Job Ad Responsibilities",
            placeholder="Summarize the Responsibilities advertised in the job ad",
        ),
        gr.Textbox(
            label=" Job Key Requirements ",
            placeholder="Summarize the key requirements advertised in the job ad",
        ),
        gr.Textbox(
            label="A Summary about your Education and Work Experience",
            placeholder="Describe your education, certifications, work experience, previous responsibilities and key career achievements",
        ),
        gr.Textbox(label="Skills", placeholder="List your key skills that match this job"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
    ],
    title="STAR-Style Interview Questions!",
    description="This app generates STAR-Style Job Interview Questions customized to the specific role, industry and employer of the job you are applying to. Based on your input. Powered by OpenAI GPT-4o, design thinking, and domain expertise. Developed by wn. Disclaimer: AI can make mistakes. Use with caution and at your own risk!",
    type="messages",  # Required for Gradio ChatInterface streaming
)

if __name__ == "__main__":
    demo.launch()