Spaces:
Sleeping
Sleeping
| from app.state.state import OnboardingState | |
| from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage, AIMessage | |
| from app.prompts.resume_agent_prompt import resume_agent_prompt | |
| from app.prompts.jd_agent_prompt import jd_agent_prompt | |
| from app.prompts.roadmap_planner_agent_prompt import roadmap_planner_agent_prompt | |
| from app.agents.agents import resume_agent, jd_agent, roadmap_planner_agent, gap_analysis_agent, roadmap_planner_agent_tools | |
| from app.prompts.gap_analysis_agent_prompt import gap_analysis_agent_prompt | |
| import json | |
| import logging | |
| from app.tools.tools import * | |
| from langchain_community.document_loaders import PyMuPDFLoader | |
| from langgraph.prebuilt import ToolNode, tools_condition | |
| from app.schemas.jd_extract_schema import JobDescriptionExtract | |
| from app.schemas.resume_extract_schema import ResumeExtract | |
| from app.schemas.skill_gap_analysis_schema import SkillGapAnalysis | |
| logger = logging.getLogger(__name__) | |
| def input_node(state: OnboardingState): | |
| """Load and extract text from resume PDF.""" | |
| file_path = state.get("file_path") | |
| if not file_path: | |
| return {"extraction_error": "Missing file_path in state"} | |
| try: | |
| loader = PyMuPDFLoader(file_path) | |
| docs = loader.load() | |
| resume_text = "\n".join([doc.page_content for doc in docs]) | |
| return { | |
| "resume_text": resume_text, | |
| "extraction_error": None | |
| } | |
| except Exception as e: | |
| logger.error(f"Failed to load resume: {str(e)}") | |
| return { | |
| "extraction_error": f"Failed to load resume: {str(e)}" | |
| } | |
| def extractResumeDataNode(state: OnboardingState): | |
| """Extract structured resume data using resume agent.""" | |
| resume_text = state["resume_text"] | |
| messages = [ | |
| SystemMessage(content=resume_agent_prompt), | |
| HumanMessage(content=f"<resume_text>{resume_text}</resume_text>") | |
| ] | |
| result = resume_agent.invoke(messages) | |
| return {"resume_data": result["parsed"]} | |
| # def extractJDDataNode(state: OnboardingState): | |
| # """Extract structured job description data using JD agent.""" | |
| # jd_text = state.get("job_description", "") | |
| # if not jd_text or len(jd_text.strip()) < 5: | |
| # logger.warning("job_description text is missing from state") | |
| # return {"JobDescriptionExtract_data": JobDescriptionExtract()} | |
| # logger.info(f"Extracting JD from {len(jd_text)} characters") | |
| # messages = [ | |
| # SystemMessage(content=jd_agent_prompt), | |
| # HumanMessage(content=f"<job_description>{jd_text}</job_description>") | |
| # ] | |
| # try: | |
| # result = jd_agent.invoke(messages) | |
| # parsed_data = result.get("parsed") if isinstance(result, dict) else result | |
| # if parsed_data.job_title is None and parsed_data.tools_technologies is None: | |
| # logger.warning("JD extraction returned empty schema") | |
| # else: | |
| # logger.info(f"Successfully extracted job title: {parsed_data.job_title}") | |
| # return {"JobDescriptionExtract_data": parsed_data} | |
| # except Exception as e: | |
| # logger.error(f"JD extraction failed: {str(e)}") | |
| # return {"JobDescriptionExtract_data": JobDescriptionExtract()} | |
| def skill_gap_node(state: OnboardingState): | |
| """Analyze skill gaps between resume and job description.""" | |
| resume_data = state["resume_data"] | |
| candidate_name = state.get("candidate_name", "Candidate") | |
| # Convert Pydantic models to lean dicts (exclude None values) | |
| lean_resume_dict = resume_data.model_dump(exclude_none=True) | |
| jd_text = state.get("job_description", "") | |
| # Serialize to JSON | |
| lean_resume_json = json.dumps(lean_resume_dict, indent=2) | |
| # Clean prompt with proper formatting | |
| prompt_text = f"""Analyze the skill gaps for the following candidate: | |
| Candidate Name: {candidate_name} | |
| Resume: | |
| {lean_resume_json} | |
| Job Description: | |
| {jd_text} | |
| Please provide a detailed skill gap analysis.""" | |
| messages = [ | |
| SystemMessage(content=gap_analysis_agent_prompt), | |
| HumanMessage(content=prompt_text) | |
| ] | |
| try: | |
| result = gap_analysis_agent.invoke(messages) | |
| return {"skill_gap_analysis_data": result["parsed"]} | |
| except Exception as e: | |
| logger.error(f"Skill gap analysis failed: {str(e)}") | |
| return {"skill_gap_analysis_data": SkillGapAnalysis()} | |
| def roadmap_planning_node(state: OnboardingState): | |
| """ | |
| Plan learning roadmap based on skill gaps. | |
| This node decides which tools to call next based on the analysis. | |
| """ | |
| skill_gap_data = state["skill_gap_analysis_data"] | |
| # Convert Pydantic model to dict, then to JSON | |
| skill_gap_dict = skill_gap_data.model_dump() | |
| skill_gap_json = json.dumps(skill_gap_dict, indent=2) | |
| system_prompt = SystemMessage(content=roadmap_planner_agent_prompt) | |
| input_msg = HumanMessage(content=f"<skill_gap_analysis>\n{skill_gap_json}\n</skill_gap_analysis>") | |
| try: | |
| response = roadmap_planner_agent.invoke([system_prompt, input_msg] + state.get("messages", [])) | |
| return {"messages": [response]} | |
| except Exception as e: | |
| logger.error(f"Roadmap planning failed: {str(e)}") | |
| return {"messages": [AIMessage(content=f"Error in roadmap planning: {str(e)}")]} | |
| # Initialize tool node for roadmap planner | |
| tool_node = ToolNode(roadmap_planner_agent_tools) |