File size: 4,739 Bytes
435ca27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import os
import sys
import gradio as gr
from huggingface_hub import InferenceClient

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from rag.logger import get_logger
from rag.analysis_chain import retriever, hf_llm, analyze_resume_against_job

logger = get_logger(__name__)

# -----------------------------------
# Load HuggingFace API key
# -----------------------------------
HF_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
if not HF_API_TOKEN:
    raise RuntimeError("Environment variable HUGGINGFACE_API_TOKEN is missing!")

client = InferenceClient(
    token=HF_API_TOKEN,
    model=""       # When in use insert model name as parameter here
)

# -----------------------------------
# System Prompt
# -----------------------------------
DEFAULT_SYSTEM_MESSAGE = """

You are a helpful resume-analysis chatbot.



You can perform the following tasks on the data you have:

1. Job description analysis using the RAG pipeline.

2. Candidate summarization using the vectorstore *WHICH YOU ALREADY HAVE*.

3. General conversation.



Always respond clearly and professionally as if you were a talent aquisition specialist.

"""

# -----------------------------------
# INTENT DETECTOR
# -----------------------------------
def detect_intent(user_message: str):
    """Lightweight rule-based intent classifier."""
    message = user_message.lower()

    # --- JOB DESCRIPTION ANALYSIS ---
    jd_keywords = [
        "responsibilities", "requirements", "we are looking for",
        "qualifications", "role description", "job description",
        "candidate must", "skills required", "apply", "position",
        "looking for a", "experience required"
    ]
    if any(k in message for k in jd_keywords):
        return "job_analysis"

    # --- CANDIDATE SUMMARY ---
    candidate_keywords = [
        "candidate", "tell me about him", "tell me about her", "profile summary",
        "summary", "skills", "experience", "background", "what can he do",
        "what is his experience", "what is his background", "about the candidate", "about his resume"
    ]
    if any(k in message for k in candidate_keywords):
        return "candidate_info"

    # --- DEFAULT ---
    return "general"


# -----------------------------------
# BOT RESPONSE
# -----------------------------------
def bot_response(message, history):
    system_msg = DEFAULT_SYSTEM_MESSAGE
    max_tokens = 500
    temperature = 0.7
    top_p = 0.95
    intent = detect_intent(message)

    # -----------------------------------
    # INTENT 1 → JOB ANALYSIS USING RAG
    # -----------------------------------
    if intent == "job_analysis":
        rag_output = analyze_resume_against_job(
            job_description=message,
            retriever=retriever,
            llm_callable=hf_llm
        )
        prompt = f"{system_msg}\n\n{rag_output}"

    # -----------------------------------
    # INTENT 2 → CANDIDATE SUMMARY
    # -----------------------------------
    elif intent == "candidate_info":
        # Use LCEL retriever interface (correct for VectorStoreRetriever)
        retrieved_docs = retriever.invoke("candidate overall profile")
        combined = "\n".join([doc.page_content for doc in retrieved_docs])

        prompt = f"""

You are a professional candidate summarization assistant.



Using the resume data below, create a detailed profile summary.



Resume Data:

{combined}



Provide:

- background

- key experiences

- technical + soft skills

- strengths

- ideal job roles

"""

    # -----------------------------------
    # INTENT 3 → GENERAL CHAT
    # -----------------------------------
    else:
        prompt = f"{system_msg}\nUser: {message}"

    # -----------------------------------
    # STREAMING HF LLM OUTPUT
    # -----------------------------------
    response = ""
    for chunk in client.chat_completion(
        messages=[{"role": "user", "content": prompt}],
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p,
        stream=True
    ):
        token = chunk.choices[0].delta.content or ""
        response += token
        yield response


# -----------------------------------
# UI: ChatGPT-style interface
# -----------------------------------
chatbot = gr.ChatInterface(
    fn=bot_response,
    title="GenAI Career Agent"
)


# -----------------------------------
# Layout (NO LOGIN)
# -----------------------------------
with gr.Blocks() as demo:
    gr.Markdown("## Resume Analyst RAG Chatbot")
    gr.Markdown("Uses FAISS + HuggingFace LLM + custom RAG analysis pipeline.")
    chatbot.render()