|
|
import os
|
|
|
import sys
|
|
|
import gradio as gr
|
|
|
from huggingface_hub import InferenceClient
|
|
|
|
|
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
|
|
from rag.logger import get_logger
|
|
|
from rag.analysis_chain import retriever, hf_llm, analyze_resume_against_job
|
|
|
|
|
|
logger = get_logger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HF_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
|
|
|
if not HF_API_TOKEN:
|
|
|
raise RuntimeError("Environment variable HUGGINGFACE_API_TOKEN is missing!")
|
|
|
|
|
|
client = InferenceClient(
|
|
|
token=HF_API_TOKEN,
|
|
|
model=""
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEFAULT_SYSTEM_MESSAGE = """
|
|
|
You are a helpful resume-analysis chatbot.
|
|
|
|
|
|
You can perform the following tasks on the data you have:
|
|
|
1. Job description analysis using the RAG pipeline.
|
|
|
2. Candidate summarization using the vectorstore *WHICH YOU ALREADY HAVE*.
|
|
|
3. General conversation.
|
|
|
|
|
|
Always respond clearly and professionally as if you were a talent aquisition specialist.
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def detect_intent(user_message: str):
|
|
|
"""Lightweight rule-based intent classifier."""
|
|
|
message = user_message.lower()
|
|
|
|
|
|
|
|
|
jd_keywords = [
|
|
|
"responsibilities", "requirements", "we are looking for",
|
|
|
"qualifications", "role description", "job description",
|
|
|
"candidate must", "skills required", "apply", "position",
|
|
|
"looking for a", "experience required"
|
|
|
]
|
|
|
if any(k in message for k in jd_keywords):
|
|
|
return "job_analysis"
|
|
|
|
|
|
|
|
|
candidate_keywords = [
|
|
|
"candidate", "tell me about him", "tell me about her", "profile summary",
|
|
|
"summary", "skills", "experience", "background", "what can he do",
|
|
|
"what is his experience", "what is his background", "about the candidate", "about his resume"
|
|
|
]
|
|
|
if any(k in message for k in candidate_keywords):
|
|
|
return "candidate_info"
|
|
|
|
|
|
|
|
|
return "general"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def bot_response(message, history):
|
|
|
system_msg = DEFAULT_SYSTEM_MESSAGE
|
|
|
max_tokens = 500
|
|
|
temperature = 0.7
|
|
|
top_p = 0.95
|
|
|
intent = detect_intent(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if intent == "job_analysis":
|
|
|
rag_output = analyze_resume_against_job(
|
|
|
job_description=message,
|
|
|
retriever=retriever,
|
|
|
llm_callable=hf_llm
|
|
|
)
|
|
|
prompt = f"{system_msg}\n\n{rag_output}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif intent == "candidate_info":
|
|
|
|
|
|
retrieved_docs = retriever.invoke("candidate overall profile")
|
|
|
combined = "\n".join([doc.page_content for doc in retrieved_docs])
|
|
|
|
|
|
prompt = f"""
|
|
|
You are a professional candidate summarization assistant.
|
|
|
|
|
|
Using the resume data below, create a detailed profile summary.
|
|
|
|
|
|
Resume Data:
|
|
|
{combined}
|
|
|
|
|
|
Provide:
|
|
|
- background
|
|
|
- key experiences
|
|
|
- technical + soft skills
|
|
|
- strengths
|
|
|
- ideal job roles
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else:
|
|
|
prompt = f"{system_msg}\nUser: {message}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
response = ""
|
|
|
for chunk in client.chat_completion(
|
|
|
messages=[{"role": "user", "content": prompt}],
|
|
|
max_tokens=max_tokens,
|
|
|
temperature=temperature,
|
|
|
top_p=top_p,
|
|
|
stream=True
|
|
|
):
|
|
|
token = chunk.choices[0].delta.content or ""
|
|
|
response += token
|
|
|
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chatbot = gr.ChatInterface(
|
|
|
fn=bot_response,
|
|
|
title="GenAI Career Agent"
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo:
|
|
|
gr.Markdown("## Resume Analyst RAG Chatbot")
|
|
|
gr.Markdown("Uses FAISS + HuggingFace LLM + custom RAG analysis pipeline.")
|
|
|
chatbot.render()
|
|
|
|