QuentinL52 commited on
Commit
bd3b76c
·
verified ·
1 Parent(s): 67ce3bc

Update src/interview_simulator/entretient_version_prod.py

Browse files
src/interview_simulator/entretient_version_prod.py CHANGED
@@ -4,16 +4,12 @@ import json
4
  from typing import Dict, List, Any, Annotated
5
  from typing_extensions import TypedDict
6
 
7
- from langchain_core.messages import AIMessage, SystemMessage, HumanMessage, ToolMessage
8
- from langchain_groq import ChatGroq
9
  from langgraph.graph import StateGraph, START, END
10
  from langgraph.graph.message import add_messages
11
- from langgraph.prebuilt import ToolNode
12
  from langchain_openai import ChatOpenAI
13
 
14
  from src.config import read_system_prompt, format_cv
15
- from src.crew.crew_pool import interview_analyser
16
-
17
 
18
  class State(TypedDict):
19
  messages: Annotated[list, add_messages]
@@ -28,9 +24,7 @@ class InterviewProcessor:
28
  self.job_offer = job_offer
29
  self.cv_data = cv_document['candidat']
30
  self.conversation_history = conversation_history
31
- self.tools = [interview_analyser]
32
  self.llm = self._get_llm()
33
- self.llm_with_tools = self.llm.bind_tools(self.tools)
34
 
35
  self.system_prompt_template = self._load_prompt_template()
36
  self.graph = self._build_graph()
@@ -38,59 +32,41 @@ class InterviewProcessor:
38
  def _get_llm(self) -> ChatOpenAI:
39
  openai_api_key = os.getenv("OPENAI_API_KEY")
40
  return ChatOpenAI(
41
- temperature=0.6,
42
- model_name="gpt-4o-mini",
43
- api_key=openai_api_key
44
- )
45
 
46
  def _load_prompt_template(self) -> str:
47
- return read_system_prompt('prompts/rag_prompt_old.txt')
48
 
49
  def _chatbot_node(self, state: State) -> dict:
50
- if state["messages"] and isinstance(state["messages"][-1], ToolMessage):
51
- tool_message = state["messages"][-1]
52
- return {"messages": [AIMessage(content=tool_message.content)]}
53
  messages = state["messages"]
54
  formatted_cv_str = format_cv(self.cv_data)
55
 
56
- mission = self.job_offer.get('mission', 'Non spécifiée')
57
- profil_recherche = self.job_offer.get('profil_recherche', 'Non spécifié')
58
- competences = self.job_offer.get('competences', 'Non spécifiées')
59
- pole = self.job_offer.get('pole', 'Non spécifié')
60
  system_prompt = self.system_prompt_template.format(
61
  entreprise=self.job_offer.get('entreprise', 'notre entreprise'),
62
  poste=self.job_offer.get('poste', 'ce poste'),
63
- mission=mission,
64
- profil_recherche=profil_recherche,
65
- competences=competences,
66
- pole=pole,
67
  cv=formatted_cv_str
68
  )
 
69
  llm_messages = [SystemMessage(content=system_prompt)] + messages
70
- response = self.llm_with_tools.invoke(llm_messages)
71
  return {"messages": [response]}
72
 
73
- def _route_after_chatbot(self, state: State) -> str:
74
- last_message = state["messages"][-1]
75
- if last_message.tool_calls:
76
- return "call_tool"
77
- return END
78
-
79
  def _build_graph(self) -> any:
80
  graph_builder = StateGraph(State)
81
 
82
  graph_builder.add_node("chatbot", self._chatbot_node)
83
- graph_builder.add_node("call_tool", ToolNode(self.tools))
84
- graph_builder.add_edge(START, "chatbot")
85
- graph_builder.add_conditional_edges(
86
- "chatbot",
87
- self._route_after_chatbot,
88
- {
89
- "call_tool": "call_tool",
90
- END: END
91
- }
92
- )
93
- graph_builder.add_edge("call_tool", "chatbot")
94
  return graph_builder.compile()
95
 
96
  def run(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
 
4
  from typing import Dict, List, Any, Annotated
5
  from typing_extensions import TypedDict
6
 
7
+ from langchain_core.messages import AIMessage, SystemMessage, HumanMessage
 
8
  from langgraph.graph import StateGraph, START, END
9
  from langgraph.graph.message import add_messages
 
10
  from langchain_openai import ChatOpenAI
11
 
12
  from src.config import read_system_prompt, format_cv
 
 
13
 
14
  class State(TypedDict):
15
  messages: Annotated[list, add_messages]
 
24
  self.job_offer = job_offer
25
  self.cv_data = cv_document['candidat']
26
  self.conversation_history = conversation_history
 
27
  self.llm = self._get_llm()
 
28
 
29
  self.system_prompt_template = self._load_prompt_template()
30
  self.graph = self._build_graph()
 
32
  def _get_llm(self) -> ChatOpenAI:
33
  openai_api_key = os.getenv("OPENAI_API_KEY")
34
  return ChatOpenAI(
35
+ temperature=0.6,
36
+ model_name="gpt-4o-mini",
37
+ api_key=openai_api_key
38
+ )
39
 
40
  def _load_prompt_template(self) -> str:
41
+ return read_system_prompt('prompts/rag_prompt_old.txt')
42
 
43
  def _chatbot_node(self, state: State) -> dict:
 
 
 
44
  messages = state["messages"]
45
  formatted_cv_str = format_cv(self.cv_data)
46
 
47
+ # Formatage du prompt système
 
 
 
48
  system_prompt = self.system_prompt_template.format(
49
  entreprise=self.job_offer.get('entreprise', 'notre entreprise'),
50
  poste=self.job_offer.get('poste', 'ce poste'),
51
+ mission=self.job_offer.get('mission', 'Non spécifiée'),
52
+ profil_recherche=self.job_offer.get('profil_recherche', 'Non spécifié'),
53
+ competences=self.job_offer.get('competences', 'Non spécifiées'),
54
+ pole=self.job_offer.get('pole', 'Non spécifié'),
55
  cv=formatted_cv_str
56
  )
57
+
58
  llm_messages = [SystemMessage(content=system_prompt)] + messages
59
+ response = self.llm.invoke(llm_messages)
60
  return {"messages": [response]}
61
 
 
 
 
 
 
 
62
  def _build_graph(self) -> any:
63
  graph_builder = StateGraph(State)
64
 
65
  graph_builder.add_node("chatbot", self._chatbot_node)
66
+ graph_builder.add_edge(START, "chatbot")
67
+ # Le graph se termine toujours après la réponse du chatbot
68
+ graph_builder.add_edge("chatbot", END)
69
+
 
 
 
 
 
 
 
70
  return graph_builder.compile()
71
 
72
  def run(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]: