umarch commited on
Commit
1c71f7c
·
verified ·
1 Parent(s): 9bff1c9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app_updated_0303.py +339 -0
  2. rag_utils_updated_0303.py +195 -0
app_updated_0303.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import os
4
+ import logging
5
+ import re
6
+ from chromadb import PersistentClient
7
+ from sentence_transformers import SentenceTransformer
8
+ from langchain_groq import ChatGroq
9
+ from rag_utils_updated import extract_text, preprocess_text, get_embeddings, is_image_pdf, assess_cv, extract_job_requirements
10
+ import plotly.graph_objects as go
11
+
12
+ # Logging setup
13
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Initialize session state (ONLY for job description and flags)
17
+ if "job_description" not in st.session_state:
18
+ st.session_state.job_description = ""
19
+ if "continue_to_detailed_assessment" not in st.session_state:
20
+ st.session_state.continue_to_detailed_assessment = False
21
+ if "requirements" not in st.session_state:
22
+ st.session_state.requirements = None
23
+ if "detailed_assessments" not in st.session_state:
24
+ st.session_state.detailed_assessments = {} # Initialize as an empty dictionary
25
+ if "chromadb_initialized" not in st.session_state:
26
+ st.session_state.chromadb_initialized = False
27
+ if "cvs" not in st.session_state:
28
+ st.session_state.cvs = {}
29
+ if "job_description_embedding" not in st.session_state:
30
+ st.session_state.job_description_embedding = None
31
+ # Initialize session state variable
32
+ if "assessment_completed" not in st.session_state:
33
+ st.session_state.assessment_completed = False
34
+
35
+ # Persistent Storage for Embeddings
36
+ PERMANENT_DB_PATH = "./cv_db"
37
+ if "collection" not in st.session_state:
38
+ db_client = PersistentClient(path=PERMANENT_DB_PATH)
39
+ st.session_state.collection = db_client.get_or_create_collection("cv_embeddings")
40
+
41
+ if "embedding_model" not in st.session_state:
42
+ st.session_state.embedding_model = SentenceTransformer('all-mpnet-base-v2')
43
+
44
+ if "groq_client" not in st.session_state:
45
+ st.session_state.groq_client = ChatGroq(api_key=os.environ.get("GROQ_API_KEY"))
46
+
47
+ st.title("CV Assessment and Ranking App")
48
+
49
+ # 1. Input Job Description
50
+ st.subheader("Enter Job Description")
51
+ requirements_source = st.radio("Source:", ("File Upload", "Web Page Link", "Text Input"))
52
+
53
+ job_description_text = ""
54
+ if requirements_source == "File Upload":
55
+ uploaded_file = st.file_uploader("Upload Job Requirements (PDF/DOCX)", type=["pdf", "docx"])
56
+ if uploaded_file:
57
+ job_description_text = extract_text(uploaded_file)
58
+ elif requirements_source == "Web Page Link":
59
+ # webpage_url = st.text_input("Enter Web Page URL")
60
+ # if webpage_url:
61
+ # job_description_text = extract_text(webpage_url)
62
+ st.warning("This function is not available in MVP yet.")
63
+ elif requirements_source == "Text Input":
64
+ job_description_text = st.text_area("Enter Job Requirements", height=200)
65
+
66
+ st.session_state.job_description = job_description_text
67
+
68
+ if st.session_state.job_description:
69
+ st.success("Job description uploaded successfully!")
70
+
71
+ # 2. Upload CVs (Folder Upload)
72
+ st.subheader("Upload CVs (Folder)")
73
+ uploaded_files = st.file_uploader("Choose a folder containing CV files", accept_multiple_files=True)
74
+
75
+ if uploaded_files and not st.session_state.assessment_completed:
76
+ st.write(f"{len(uploaded_files)} CV(s) uploaded.")
77
+
78
+ st.session_state.cvs = {}
79
+ cv_embeddings_created = 0
80
+
81
+ if not st.session_state.chromadb_initialized:
82
+ try:
83
+ ids_in_collection = st.session_state.collection.get()['ids']
84
+ if ids_in_collection:
85
+ st.session_state.collection.delete(ids=ids_in_collection)
86
+ logger.info("ChromaDB collection cleared.")
87
+ else:
88
+ logger.info("ChromaDB collection is already empty. Skipping deletion.")
89
+ except Exception as e:
90
+ st.error(f"Error clearing ChromaDB collection: {e}")
91
+ st.stop()
92
+ st.session_state.chromadb_initialized = True
93
+
94
+ for uploaded_file in uploaded_files:
95
+ filename = uploaded_file.name
96
+ if filename in st.session_state.cvs:
97
+ continue
98
+
99
+ for attempt in range(2):
100
+ try:
101
+ if is_image_pdf(uploaded_file):
102
+ st.warning(f"{filename} appears to be an image-based PDF and cannot be processed.")
103
+ break
104
+
105
+ text = extract_text(uploaded_file)
106
+ if not text.strip():
107
+ raise ValueError("No text extracted.")
108
+
109
+ preprocessed_text = preprocess_text(text)
110
+ embedding = get_embeddings(preprocessed_text, st.session_state.embedding_model)
111
+
112
+ st.session_state.cvs[filename] = {
113
+ "text": preprocessed_text,
114
+ "embedding": embedding,
115
+ }
116
+ cv_embeddings_created += 1
117
+
118
+ try:
119
+ st.session_state.collection.add(
120
+ embeddings=[embedding],
121
+ documents=[preprocessed_text],
122
+ ids=[filename],
123
+ metadatas=[{"filename": filename}]
124
+ )
125
+ logger.info(f"Embedding for {filename} added to ChromaDB.")
126
+ except Exception as e:
127
+ st.error(f"Error adding embedding to ChromaDB for {filename}: {e}")
128
+ st.stop()
129
+
130
+ break
131
+
132
+ except Exception as e:
133
+ logger.error(f"Text extraction failed for {filename} on attempt {attempt + 1}: {e}")
134
+ if attempt == 1:
135
+ st.error(f"Failed to process {filename} after multiple attempts.")
136
+
137
+ if cv_embeddings_created > 0:
138
+ st.success(f"{cv_embeddings_created} CV embeddings created successfully!")
139
+
140
+ num_errors = len(uploaded_files) - cv_embeddings_created
141
+ if num_errors > 0:
142
+ st.error(f"Error in CV embeddings creation for {num_errors} CV(s).")
143
+
144
+ if st.button("Continue Assessment"):
145
+ st.session_state.continue_to_detailed_assessment = True
146
+
147
+ elif uploaded_files and st.session_state.assessment_completed:
148
+ st.warning("This is an MVP. Please refresh the page before uploading and assessing new files.")
149
+
150
+ if st.session_state.continue_to_detailed_assessment:
151
+ st.session_state.continue_to_detailed_assessment = False # reset value
152
+ st.write("Performing detailed assessments...")
153
+
154
+ # Extract Job Requirements
155
+ if st.session_state.job_description and st.session_state.requirements is None:
156
+ st.session_state.requirements = extract_job_requirements(st.session_state.job_description, st.session_state.groq_client)
157
+ if st.session_state.requirements:
158
+ with st.expander("Extracted Job Requirements:"):
159
+ for req in st.session_state.requirements:
160
+ st.write(f"- {req}")
161
+ # st.write("Extracted Job Requirements:")
162
+ # for req in st.session_state.requirements:
163
+ # st.write(f"- {req}")
164
+ else:
165
+ st.warning("Could not extract job requirements.")
166
+
167
+ # Generate job description embedding if not already done
168
+ if st.session_state.job_description and st.session_state.job_description_embedding is None:
169
+ try:
170
+ job_description_embedding = get_embeddings(st.session_state.job_description, st.session_state.embedding_model)
171
+ st.session_state.job_description_embedding = job_description_embedding
172
+ except Exception as e:
173
+ st.error(f"Error creating job description embedding: {e}")
174
+ st.stop()
175
+
176
+ # Detailed CV Assessments
177
+ selected_cvs = list(st.session_state.cvs.keys())
178
+
179
+ if not st.session_state.detailed_assessments:
180
+ st.session_state.detailed_assessments = {}
181
+ with st.spinner("Performing detailed assessments..."):
182
+ for filename in selected_cvs:
183
+ if filename in st.session_state.cvs:
184
+ cv_text = st.session_state.cvs[filename]["text"]
185
+ try:
186
+ assessment = assess_cv(cv_text, st.session_state.requirements, filename, st.session_state.groq_client)
187
+ st.session_state.detailed_assessments[filename] = assessment
188
+ except Exception as e:
189
+ st.error(f"Error during detailed assessment of {filename}: {e}")
190
+
191
+ # Display Results (Remaining part of the code)
192
+ st.session_state.assessment_completed = True
193
+ st.success("Detailed assessments complete!")
194
+
195
+ st.subheader("Candidates Assessment and Ranking")
196
+
197
+ def parse_assessment(raw_response, requirements):
198
+ """Parses the LLM's assessment with robust error handling."""
199
+ matches = {
200
+ "technical_lead": "Not Found",
201
+ "hr_specialist": "Not Found",
202
+ "project_manager": "Not Found",
203
+ "final_assessment": "Not Found",
204
+ "recommendation": "Not Found",
205
+ "technical_lead_score": "Not Found",
206
+ "hr_specialist_score": "Not Found",
207
+ "project_manager_score": "Not Found",
208
+ "final_assessment_score": "Not Found",
209
+ }
210
+
211
+ try:
212
+ # Parse labeled scores
213
+ technical_lead_match = re.search(r"Technical Lead Assessment:\s*(.*?)\s*Technical Lead Score:\s*(\d+)", raw_response, re.IGNORECASE | re.DOTALL)
214
+ if technical_lead_match:
215
+ matches["technical_lead"] = technical_lead_match.group(1).strip()
216
+ matches["technical_lead_score"] = technical_lead_match.group(2)
217
+
218
+ hr_specialist_match = re.search(r"HR Specialist Assessment:\s*(.*?)\s*HR Specialist Score:\s*(\d+)", raw_response, re.IGNORECASE | re.DOTALL)
219
+ if hr_specialist_match:
220
+ matches["hr_specialist"] = hr_specialist_match.group(1).strip()
221
+ matches["hr_specialist_score"] = hr_specialist_match.group(2)
222
+
223
+ project_manager_match = re.search(r"Project Manager Assessment:\s*(.*?)\s*Project Manager Score:\s*(\d+)", raw_response, re.IGNORECASE | re.DOTALL)
224
+ if project_manager_match:
225
+ matches["project_manager"] = project_manager_match.group(1).strip()
226
+ matches["project_manager_score"] = project_manager_match.group(2)
227
+
228
+ final_assessment_match = re.search(r"Final Assessment:\s*(.*?)\s*Final Assessment Score:\s*(\d+)", raw_response, re.IGNORECASE | re.DOTALL)
229
+ if final_assessment_match:
230
+ matches["final_assessment"] = final_assessment_match.group(1).strip()
231
+ matches["final_assessment_score"] = final_assessment_match.group(2)
232
+
233
+ recommendation_match = re.search(r"Recommendation:\s*(.*?)$", raw_response, re.IGNORECASE | re.DOTALL)
234
+ if recommendation_match:
235
+ matches["recommendation"] = recommendation_match.group(1).strip()
236
+
237
+ # Fallback mechanism: extract scores from raw response if labels are not found
238
+ if matches["technical_lead_score"] == "Not Found":
239
+ score_match = re.search(r"Technical Lead Assessment:.*?score(?:s)?\s*(?:of)?\s*(\d+)\s*(?:out\s*of|\/)\s*100", raw_response, re.IGNORECASE | re.DOTALL)
240
+ if score_match:
241
+ matches["technical_lead_score"] = score_match.group(1)
242
+ if matches["hr_specialist_score"] == "Not Found":
243
+ score_match = re.search(r"HR Specialist Assessment:.*?score(?:s)?\s*(?:of)?\s*(\d+)\s*(?:out\s*of|\/)\s*100", raw_response, re.IGNORECASE | re.DOTALL)
244
+ if score_match:
245
+ matches["hr_specialist_score"] = score_match.group(1)
246
+ if matches["project_manager_score"] == "Not Found":
247
+ score_match = re.search(r"Project Manager Assessment:.*?score(?:s)?\s*(?:of)?\s*(\d+)\s*(?:out\s*of|\/)\s*100", raw_response, re.IGNORECASE | re.DOTALL)
248
+ if score_match:
249
+ matches["project_manager_score"] = score_match.group(1)
250
+ if matches["final_assessment_score"] == "Not Found":
251
+ score_match = re.search(r"Final Assessment:.*?(?:Consensus Score|total of|final score).*?(\d+)\s*(?:out of)?\s*100", raw_response, re.IGNORECASE | re.DOTALL)
252
+ if score_match:
253
+ matches["final_assessment_score"] = score_match.group(1)
254
+
255
+ except Exception as e:
256
+ print(f"Error parsing assessment: {e}")
257
+
258
+ return matches
259
+
260
+ # Data frame logic
261
+ if st.session_state.detailed_assessments:
262
+ assessments_df = pd.DataFrame(columns=["filename",
263
+ "final_assessment_score", "final_assessment",
264
+ "technical_lead_score", "technical_lead",
265
+ "hr_specialist_score", "hr_specialist",
266
+ "project_manager_score", "project_manager",
267
+ "recommendation"
268
+ ])
269
+ for filename, assessment in st.session_state.detailed_assessments.items():
270
+ if "error" in assessment:
271
+ st.error(assessment["error"])
272
+ elif "raw_response" in assessment:
273
+ parsed_data = parse_assessment(assessment["raw_response"], st.session_state.requirements)
274
+ # Append the new dictionary as a row
275
+ assessments_df = pd.concat([assessments_df, pd.DataFrame([parsed_data])], ignore_index=True)
276
+ assessments_df.loc[assessments_df.index[-1], 'filename'] = filename
277
+ #st.write("---")
278
+
279
+ # Sort the DataFrame by 'final_assessment_score' in descending order
280
+ # Convert the column to numeric before sorting
281
+ assessments_df['final_assessment_score'] = pd.to_numeric(assessments_df['final_assessment_score'], errors='coerce') #coerce turns non numeric values to NaN.
282
+ assessments_df = assessments_df.sort_values(by='final_assessment_score', ascending=False)
283
+
284
+ st.dataframe(assessments_df)
285
+
286
+
287
+ st.subheader("Detailed Assessment Results")
288
+ # Iterate through the DataFrame rows to display the UI for each assessment
289
+ for index, row in assessments_df.iterrows():
290
+ st.write(f"**Filename:** {row['filename']}")
291
+ scores = {
292
+ "Technical Lead": int(row["technical_lead_score"]),
293
+ "HR Specialist": int(row["hr_specialist_score"]),
294
+ "Project Manager": int(row["project_manager_score"]),
295
+ "Final Assessment": int(row["final_assessment_score"]),
296
+ }
297
+ scores_df = pd.DataFrame(list(scores.items()), columns=["Expert", "Score"])
298
+
299
+ # Create Plotly bar chart with annotations
300
+ fig = go.Figure(data=[go.Bar(
301
+ x=scores_df["Expert"],
302
+ y=scores_df["Score"],
303
+ text=scores_df["Score"],
304
+ textposition='auto',
305
+ )])
306
+ fig.update_layout(yaxis_range=[0, 100])
307
+
308
+ # Create columns layout
309
+ col1, col2 = st.columns([1, 3])
310
+
311
+ # Display bar chart in the first column
312
+ with col1:
313
+ st.plotly_chart(fig, use_container_width=True)
314
+
315
+ # Display collapsed panels in the second column
316
+ with col2:
317
+ with st.expander("Technical Lead Assessment"):
318
+ st.write(f"{row['technical_lead']}")
319
+ st.write(f"**Technical Lead Score:** {row['technical_lead_score']}")
320
+
321
+ with st.expander("HR Specialist Assessment"):
322
+ st.write(f"{row['hr_specialist']}")
323
+ st.write(f"**HR Specialist Score:** {row['hr_specialist_score']}")
324
+
325
+ with st.expander("Project Manager Assessment"):
326
+ st.write(f"{row['project_manager']}")
327
+ st.write(f"**Project Manager Score:** {row['project_manager_score']}")
328
+
329
+ with st.expander("Final Assessment"):
330
+ st.write(f"{row['final_assessment']}")
331
+ st.write(f"**Final Assessment Score:** {row['final_assessment_score']}")
332
+
333
+ with st.expander("Recommendation"):
334
+ st.write(f"{row['recommendation']}")
335
+
336
+ st.write("---")
337
+
338
+ else:
339
+ st.write("No detailed assessments were performed.")
rag_utils_updated_0303.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import requests
4
+ import json
5
+ import PyPDF2
6
+ import docx
7
+ from bs4 import BeautifulSoup
8
+ from chromadb import PersistentClient
9
+ from langchain_groq import ChatGroq
10
+ from langchain.prompts import ChatPromptTemplate
11
+ from langchain.output_parsers import PydanticOutputParser
12
+ from pydantic import BaseModel, Field, ValidationError
13
+ from typing import List
14
+ from sentence_transformers import SentenceTransformer # Import SentenceTransformer
15
+
16
+ # Setup logging
17
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
18
+ logger = logging.getLogger(__name__)
19
+
20
+ # --- Text Extraction ---
21
+ def extract_text(uploaded_file):
22
+ try:
23
+ if isinstance(uploaded_file, str):
24
+ return extract_text_from_webpage(uploaded_file)
25
+ elif hasattr(uploaded_file, 'type') and uploaded_file.type == "application/pdf":
26
+ if is_image_pdf(uploaded_file):
27
+ logger.warning(f"Image-based PDF detected: {uploaded_file.name}")
28
+ return "" # Skip processing
29
+ return extract_text_from_pdf(uploaded_file)
30
+ elif hasattr(uploaded_file, 'type') and uploaded_file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
31
+ return extract_text_from_docx(uploaded_file)
32
+ return ""
33
+ except Exception as e:
34
+ logger.error(f"Error extracting text: {e}")
35
+ return ""
36
+
37
+ def is_image_pdf(uploaded_file):
38
+ """Check if a PDF is image-based (contains no selectable text)."""
39
+ try:
40
+ reader = PyPDF2.PdfReader(uploaded_file)
41
+ for page in reader.pages:
42
+ if page.extract_text():
43
+ return False # Text is present, so not an image PDF
44
+ return True # No text detected, likely an image-based PDF
45
+ except Exception as e:
46
+ logger.error(f"Error checking if PDF is image-based: {e}")
47
+ return True # Assume image PDF if error occurs
48
+
49
+ def extract_text_from_pdf(uploaded_file):
50
+ try:
51
+ reader = PyPDF2.PdfReader(uploaded_file)
52
+ return "\n".join([page.extract_text() or "" for page in reader.pages])
53
+ except Exception as e:
54
+ logger.error(f"Error reading PDF {uploaded_file.name}: {e}")
55
+ return ""
56
+
57
+ def extract_text_from_docx(uploaded_file):
58
+ try:
59
+ doc = docx.Document(uploaded_file)
60
+ return "\n".join([para.text for para in doc.paragraphs])
61
+ except Exception as e:
62
+ logger.error(f"Error reading DOCX: {e}")
63
+ return ""
64
+
65
+ def extract_text_from_webpage(url):
66
+ try:
67
+ response = requests.get(url)
68
+ response.raise_for_status()
69
+ soup = BeautifulSoup(response.content, 'html.parser')
70
+ return soup.get_text(separator='\n')
71
+ except requests.exceptions.RequestException as e:
72
+ logger.error(f"Error fetching webpage: {e}")
73
+ return ""
74
+
75
+ def preprocess_text(text):
76
+ return text.lower()
77
+
78
+ def get_embeddings(text, model):
79
+ return model.encode(text)
80
+
81
+ def get_similar_cvs(cvs, job_description_embedding, collection):
82
+ results = collection.query(
83
+ query_embeddings=[job_description_embedding],
84
+ n_results=len(cvs),
85
+ include=["distances", "metadatas"]
86
+ )
87
+
88
+ similar_cvs = []
89
+ for i in range(len(results['metadatas'][0])): # Corrected loop
90
+ metadata = results['metadatas'][0][i]
91
+ if metadata: #Check if metadata exists
92
+ filename = metadata.get('filename') # Use .get to handle missing keys
93
+ if filename: # Check if filename exists in metadata
94
+ similarity_score = 1 - results['distances'][0][i]
95
+ similar_cvs.append({
96
+ "filename": filename,
97
+ "initial_score": similarity_score
98
+ })
99
+ else:
100
+ logger.warning(f"Metadata for CV at index {i} is missing 'filename'.")
101
+ else:
102
+ logger.warning(f"No metadata found for CV at index {i}.")
103
+
104
+
105
+ similar_cvs.sort(key=lambda x: x['initial_score'], reverse=True)
106
+ return similar_cvs
107
+
108
+ # ... (CV Assessment & Ranking functions)
109
+
110
+ class RequirementAssessment(BaseModel):
111
+ requirement: str
112
+ match: str = Field(pattern="^(Yes|No|Partial|Not Applicable)$")
113
+ evidence: str
114
+ justification: str
115
+
116
+ class CandidateAssessment(BaseModel):
117
+ filename: str
118
+ requirements: List[RequirementAssessment]
119
+ overall_assessment: str = Field(pattern="^(Excellent|Good|Fair|Poor)$")
120
+ recommendation: str = Field(pattern="^(Interview|Reject|Maybe)$")
121
+ justification: str
122
+
123
+
124
+ import time
125
+ import requests
126
+ import json
127
+ from pydantic import ValidationError
128
+
129
+ def assess_cv(cv_text, requirements, filename, groq_client, max_retries=3, retry_delay=2):
130
+ """Assess CV against specific job requirements with Tree-of-Thoughts."""
131
+
132
+ requirements_str = "\n".join([f"- {req}" for req in requirements])
133
+ prompt_template = ChatPromptTemplate.from_template(
134
+
135
+ template="Analyze the following CV against the job requirements below using the Tree-of-Thoughts approach.\n\n"
136
+ "Imagine three different experts: a Technical Lead, an HR Specialist, and a Project Manager.\n"
137
+ "Each expert will analyze the CV from their perspective, provide their assessment, and assign a score out of 100.\n"
138
+ "They will then collaborate to discuss their findings and reach a consensus on the final score and recommendation.\n\n"
139
+ "Provide the response in the following EXACT format. Do not deviate from this format:\n\n"
140
+ "Technical Lead Assessment:\n[Technical Lead's assessment]\nTechnical Lead Score: [Score out of 100]\n\n"
141
+ "HR Specialist Assessment:\n[HR Specialist's assessment]\nHR Specialist Score: [Score out of 100]\n\n"
142
+ "Project Manager Assessment:\n[Project Manager's assessment]\nProject Manager Score: [Score out of 100]\n\n"
143
+ "Final Assessment:\n[Collaborative assessment]\nFinal Assessment Score: [Score out of 100]\n\n"
144
+ "Recommendation: [Interview/Reject/Maybe]\n\n"
145
+ "Job Requirements:\n{requirements}\n\n"
146
+ "CV:\n{cv_text}\n\n"
147
+ "Assessment:"
148
+
149
+ )
150
+
151
+ prompt = prompt_template.format_messages(requirements=requirements_str, cv_text=cv_text)
152
+
153
+ # ... (rest of the assess_cv function remains the same)
154
+ for attempt in range(max_retries):
155
+ try:
156
+ response = groq_client.invoke(prompt, timeout=30)
157
+ response_content = response.content
158
+
159
+ return {"filename": filename, "raw_response": response_content}
160
+
161
+ except requests.exceptions.RequestException as e:
162
+ logger.error(f"Network error during Groq API call: {e}")
163
+ if attempt == max_retries - 1:
164
+ return {"filename": filename, "error": "Network error during LLM call"}
165
+ else:
166
+ logger.warning(f"Network error, retrying in {retry_delay} seconds (attempt {attempt+1}/{max_retries}).")
167
+ time.sleep(retry_delay)
168
+ retry_delay *= 2
169
+
170
+ except Exception as e:
171
+ logger.error(f"Groq API error (attempt {attempt + 1}/{max_retries}): {e}")
172
+ if attempt == max_retries - 1:
173
+ return {"filename": filename, "error": "General LLM failure"}
174
+ else:
175
+ logger.warning(f"General LLM error, retrying in {retry_delay} seconds (attempt {attempt+1}/{max_retries}).")
176
+ time.sleep(retry_delay)
177
+ retry_delay *= 2
178
+
179
+ return {"filename": filename, "error": "LLM call failed after multiple retries."}
180
+
181
+ def extract_job_requirements(job_description, groq_client):
182
+ """Extracts job requirements from the job description using the LLM."""
183
+ prompt_template = ChatPromptTemplate.from_template(
184
+ template="Extract the key job requirements from the following job description:\n\n{job_description}\n\nRequirements:"
185
+ )
186
+ prompt = prompt_template.format_messages(job_description=job_description)
187
+
188
+ try:
189
+ response = groq_client.invoke(prompt, timeout=30)
190
+ requirements_text = response.content.strip()
191
+ requirements = [req.strip() for req in requirements_text.split('\n') if req.strip()]
192
+ return requirements
193
+ except Exception as e:
194
+ logger.error(f"Error extracting job requirements: {e}")
195
+ return []