File size: 5,023 Bytes
a8b79ed 45de167 046508a a8b79ed 8acadd7 a8b79ed 046508a 8acadd7 a8b79ed 8acadd7 a8b79ed 046508a a8b79ed 046508a a8b79ed 046508a 8acadd7 a8b79ed 046508a a8b79ed 046508a a8b79ed a01026b a8b79ed 046508a a8b79ed a01026b a8b79ed 046508a a8b79ed 8acadd7 a01026b 046508a 45de167 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
"""
State definitions for the Job Writer LangGraph Workflow.
"""
from enum import StrEnum
from typing import Annotated
from typing_extensions import List, Dict, Any
from langgraph.graph import MessagesState
from dataclasses import dataclass
def merge_dict_reducer(
x: Dict[str, Any] | None, y: Dict[str, Any] | None
) -> Dict[str, Any]:
"""
Reducer function to merge two dictionaries.
Used for company_research_data to allow parallel nodes to update it.
Args:
x: First dictionary (existing state or None)
y: Second dictionary (new update or None)
Returns:
Merged dictionary with y taking precedence for overlapping keys
"""
# Handle None cases - treat as empty dict
if x is None:
x = {}
if y is None:
y = {}
# Merge dictionaries, with y taking precedence for overlapping keys
return {**x, **y}
@dataclass
class AppState(MessagesState):
"""
State container for the job application writer workflow.
Attributes:
resume: List of text chunks from the candidate's resume
job_description: List of text chunks from the job description
company_name: Extracted company name
company_research_data: Additional information about the company from research
persona: The writing persona to use ("recruiter" or "hiring_manager")
draft: Current draft of the application material
feedback: Human feedback on the draft
final: Final version of the application material
content: Type of application material to generate
"""
resume_path: str
job_description_source: str
content: str # "cover_letter", "bullets", "linkedin_note"
current_node: str
class DataLoadState(MessagesState, total=False):
"""
State container for the job application writer workflow.
Includes all fields needed throughout the entire workflow.
Attributes:
resume: List of text chunks from the candidate's resume
job_description: List of text chunks from the job description
persona: The writing persona to use ("recruiter" or "hiring_manager")
content: Type of application material to generate
draft: Current draft of the application material
feedback: Human feedback on the draft
critique_feedback: Automated critique feedback
output_data: Final output data
next_node: Next node to route to after data loading subgraph
"""
resume_path: str
job_description_source: str
content_category: str # "cover_letter", "bullets", "linkedin_note"
resume: str
job_description: str
company_name: str
current_node: str
next_node: str # For routing after data loading subgraph
# Use Annotated with reducer to allow parallel nodes to merge dictionary updates
company_research_data: Annotated[Dict[str, Any], merge_dict_reducer]
# Result fields (added for final output - optional, populated later)
draft: str
feedback: str
critique_feedback: str
output_data: str
class ResearchState(MessagesState):
"""
State container for the job application writer workflow.
Attributes:
tavily_search: Dict[str, Any] Stores the results of the Tavily search
attempted_search_queries: List of queries used extracted from the job description
compiled_knowledge: Compiled knowledge from the research
content_category: Type of application material to generate
"""
company_research_data: Dict[str, Any]
attempted_search_queries: List[str]
current_node: str
content_category: str
class ResultState(MessagesState):
"""
State container for the job application writer workflow.
Attributes:
final_result: The final generated application material
"""
draft: str
feedback: str
critique_feedback: str
current_node: str
company_research_data: Dict[str, Any]
output_data: str
class NodeName(StrEnum):
"""Node names for the job application workflow graph."""
LOAD = "load"
RESEARCH_SUBGRAPH_ADAPTER = "to_research_adapter"
RESEARCH = "research"
CREATE_DRAFT = "create_draft"
CRITIQUE = "critique"
HUMAN_APPROVAL = "human_approval"
FINALIZE = "finalize"
def dataload_to_research_adapter(state: DataLoadState) -> ResearchState:
"""
Adapter to convert DataLoadState to ResearchState.
Extracts only fields needed for research workflow following the
adapter pattern recommended by LangGraph documentation.
Parameters
----------
state: DataLoadState
Current workflow state with loaded data.
Returns
-------
ResearchState
State formatted for research subgraph with required fields.
"""
return ResearchState(
company_research_data=state.get("company_research_data", {}),
attempted_search_queries=[],
current_node="",
content_category=state.get("content_category", ""),
messages=state.get("messages", []),
)
|