Spaces:
Sleeping
Sleeping
| from langgraph.graph import StateGraph | |
| from langchain_groq import ChatGroq | |
| from langchain.prompts import PromptTemplate | |
| from typing import TypedDict | |
| import os | |
| # Load API key from Hugging Face Secret later | |
| import os | |
| llm = ChatGroq(temperature=0, model="llama3-70b-8192") | |
| class ResumeState(TypedDict): | |
| resume_text: str | |
| jd_text: str | |
| summary: str | |
| result: str | |
| reasoning: str | |
| def summarize_node(state: ResumeState) -> ResumeState: | |
| prompt = PromptTemplate.from_template("Summarize this resume:\n{resume}") | |
| summary = llm.invoke(prompt.format(resume=state["resume_text"])).content | |
| state["summary"] = summary | |
| return state | |
| def jd_match_node(state: ResumeState) -> ResumeState: | |
| prompt = PromptTemplate.from_template( | |
| "Given the resume and job description, classify the candidate as Suitable or Not Suitable. Explain your reasoning.\n\nResume:\n{resume}\n\nJob Description:\n{jd}" | |
| ) | |
| response = llm.invoke(prompt.format(resume=state["resume_text"], jd=state["jd_text"])).content.strip() | |
| if "not suitable" in response.lower(): | |
| result = "Not Suitable" | |
| elif "suitable" in response.lower(): | |
| result = "Suitable" | |
| else: | |
| result = "Unclear" | |
| state["result"] = result | |
| state["reasoning"] = response | |
| return state | |
| def build_graph(): | |
| builder = StateGraph(ResumeState) | |
| builder.add_node("summarize", summarize_node) | |
| builder.add_node("jd_match", jd_match_node) | |
| builder.set_entry_point("summarize") | |
| builder.add_edge("summarize", "jd_match") | |
| builder.set_finish_point("jd_match") | |
| return builder.compile() |