GenAI_Career_Assistant / rag /analysis_chain.py
Raheel Abdul Rehman
Prod Push
b13d185
import os
from huggingface_hub import InferenceClient
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda
# =====================================================
# 1. INITIALIZATION SECTION (run ONCE)
# =====================================================
HF_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
if not HF_API_TOKEN:
raise RuntimeError("Set HUGGINGFACE_API_TOKEN.")
MODEL_NAME = "" # When in use insert model name as parameter here
# Initialize remote LLM client ONCE
client = InferenceClient(
model=MODEL_NAME,
token=HF_API_TOKEN
)
# LLM wrapper
def hf_llm(prompt: str) -> str:
response = client.chat_completion(
messages=[{"role": "user", "content": prompt}],
max_tokens=400,
temperature=0.3
)
return response.choices[0].message["content"]
# -----------------------------
# Vectorstore initialization
# -----------------------------
def init_vectorstore():
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
vector_store_path = os.path.join(base_dir, "data", "vectorstores")
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vectorstore = FAISS.load_local(
folder_path=vector_store_path,
embeddings=embeddings,
allow_dangerous_deserialization=True,
)
return vectorstore
vectorstore = init_vectorstore()
retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
# =====================================================
# 2. BUILD RAG ANALYSIS CHAIN (no re-init)
# =====================================================
def build_analysis_chain(retriever, llm_callable):
"""
retriever -> FAISS retriever already initialized
llm_callable -> function that takes string prompt and returns string
"""
vectorstore = init_vectorstore()
retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
prompt = ChatPromptTemplate.from_template(
"""
You are a professional Resume Analyst AI.
Return a professional summary and hold a conversation keeping the following metrics in mind:
{{
"job_fit_score": 0-100,
"fit_summary": "<3 sentence summary>",
"strengths": ["..."],
"missing_skills": ["..."],
"recommendations": ["..."]
}}
RETRIEVED RESUME CONTENT:
{context}
JOB DESCRIPTION:
{job_description}
Analyze and return JSON only.
"""
)
chain = (
{
"context": retriever,
"job_description": RunnablePassthrough(),
}
| prompt
| RunnableLambda(lambda chat_prompt_value: hf_llm(chat_prompt_value.to_string()))
| StrOutputParser()
)
return chain
# =====================================================
# 3. MAIN FUNCTION FOR ANALYSIS
# =====================================================
def analyze_resume_against_job(job_description: str, retriever, llm_callable):
chain = build_analysis_chain(retriever, llm_callable)
return chain.invoke(job_description)
__all__ = [
"retriever",
"vectorstore",
"hf_llm",
"analyze_resume_against_job",
"build_analysis_chain"
]
# =====================================================
# 4. Example test run
# =====================================================
if __name__ == "__main__":
job_desc = "What is the user's machine learning experience?"
result = analyze_resume_against_job(
job_description=job_desc,
retriever=retriever,
llm_callable=hf_llm
)
print("=== ANALYSIS ===")
print(result)