meesamraza commited on
Commit
d915eee
Β·
verified Β·
1 Parent(s): 5115d95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -75
app.py CHANGED
@@ -1,21 +1,29 @@
 
 
1
  import streamlit as st
2
  import pandas as pd
3
  import io
4
  import os
5
- import fitz # PyMuPDF
6
  import docx2txt
7
  from groq import Groq
8
  from dotenv import load_dotenv
9
  from pydantic import BaseModel, Field
10
 
11
- # Test message to verify app is running
12
- st.title("πŸ› οΈ Resume Analyzer App")
13
- st.write("If you can see this, the app is running!")
 
14
 
15
- load_dotenv('.env') # Look for .env in the same directory as the script
 
16
 
 
17
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
18
 
 
 
 
19
  # Initialize Groq Client
20
  if GROQ_API_KEY:
21
  try:
@@ -24,18 +32,13 @@ if GROQ_API_KEY:
24
  st.error(f"Error initializing Groq Client: {e}")
25
  st.stop()
26
  else:
27
-
28
- st.error("GROQ_API_KEY not found. Please ensure the .env file is in the project root and contains your key.")
29
  st.stop()
30
 
31
- # Admin Password (as requested)
32
- ADMIN_PASSWORD = "admin"
33
-
34
  # Initialize Session State
35
  if 'is_admin_logged_in' not in st.session_state:
36
  st.session_state.is_admin_logged_in = False
37
  if 'analyzed_data' not in st.session_state:
38
-
39
  initial_cols = [
40
  'Name', 'Job Role', 'Resume Score (100)', 'Email', 'Phone', 'Shortlisted',
41
  'Experience Summary', 'Education Summary', 'Communication Rating (1-10)',
@@ -55,11 +58,14 @@ class ResumeAnalysis(BaseModel):
55
  certifications: list[str] = Field(description="List of professional certifications (e.g., PMP, AWS Certified).")
56
  experience_summary: str = Field(description="A concise summary of the candidate's professional experience.")
57
  education_summary: str = Field(description="A concise summary of the candidate's highest education.")
58
- communication_skills: str = Field(description="A rating (1-10) or brief description of communication skills based on the resume language.")
 
 
59
  technical_skills: list[str] = Field(description="List of technical skills/technologies mentioned (e.g., Python, SQL, Docker).")
60
- aba_therapy_skills: str = Field(description="Specific mention or score (1-10) for ABA Therapy skills, ONLY if the role is 'Therapist'.")
61
- rbt_bcba_certification: str = Field(description="Indicate 'Yes' or 'No' if RBT/BCBA certification is mentioned, ONLY if the role is 'Therapist'.")
62
- autism_care_experience_score: str = Field(description="A score (1-10) for Autism-Care Experience, ONLY if the role is 'Therapist'.")
 
63
 
64
  # --- 3. HELPER FUNCTIONS ---
65
 
@@ -68,19 +74,17 @@ def extract_text_from_file(uploaded_file):
68
  file_type = uploaded_file.type
69
  try:
70
  if file_type == "application/pdf":
71
- # Use PyMuPDF for PDF
72
  with fitz.open(stream=uploaded_file.read(), filetype="pdf") as doc:
73
  text = ""
74
  for page in doc:
75
  text += page.get_text()
76
  return text
77
  elif file_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
78
- # Use docx2txt for DOCX
79
  return docx2txt.process(uploaded_file)
80
  else:
81
  return ""
82
  except Exception as e:
83
- st.error(f"Error extracting text: {e}")
84
  return ""
85
 
86
  @st.cache_data(show_spinner="Analyzing resume with Groq...")
@@ -93,8 +97,14 @@ def analyze_resume_with_groq(resume_text: str, job_role: str) -> ResumeAnalysis:
93
  therapist_instructions = (
94
  "Because the job role is 'Therapist', you MUST carefully look for: "
95
  "1. ABA Therapy Skills, RBT/BCBA Certification, and Autism-Care Experience. "
96
- "2. Provide a score from 1-10 for the specialized fields: 'aba_therapy_skills' and 'autism_care_experience_score'. "
97
- "3. Set 'rbt_bcba_certification' to 'Yes' or 'No'."
 
 
 
 
 
 
98
  )
99
 
100
  # System Prompt for Groq
@@ -102,6 +112,7 @@ def analyze_resume_with_groq(resume_text: str, job_role: str) -> ResumeAnalysis:
102
  f"You are a professional Resume Analyzer. Your task is to extract specific information from the provided resume text. "
103
  f"The candidate is applying for the role of '{job_role}'. "
104
  f"Follow the instructions precisely and return a JSON object that strictly adheres to the provided Pydantic schema. "
 
105
  f"For skills, provide a list of 5-10 most relevant items. {therapist_instructions}"
106
  )
107
 
@@ -121,62 +132,55 @@ def analyze_resume_with_groq(resume_text: str, job_role: str) -> ResumeAnalysis:
121
  return analysis
122
 
123
  except Exception as e:
124
- st.error(f"Groq API Error: {e}")
 
 
125
  # Return an empty/default analysis object on failure
126
- return ResumeAnalysis(name="Extraction Failed", email="", phone="", certifications=[], experience_summary="", education_summary="", communication_skills="0", technical_skills=[], aba_therapy_skills="0", rbt_bcba_certification="No", autism_care_experience_score="0")
127
 
128
 
129
  def calculate_resume_score(analysis: ResumeAnalysis) -> float:
130
  """Calculates the weighted score out of 100."""
131
 
132
- # Weights for maximum possible score contribution:
133
- # Experience (40%), Skills (30%), Communication (20%), Certifications (10%)
134
-
135
  total_score = 0.0
136
 
137
  # 1. Experience Score (Max 40 points)
138
- # Simple heuristic: longer summary means more experience found.
139
- # Max score is 40.
140
- exp_factor = min(len(analysis.experience_summary) / 100.0, 1.0) # Use 100 chars as the max point
141
  total_score += exp_factor * 40.0
142
 
143
  # 2. Skills Score (Max 30 points)
144
- # Based on number of skills found (up to 10 relevant skills)
145
- # Max score is 30.
146
  skills_factor = min(len(analysis.technical_skills) / 10.0, 1.0)
147
  total_score += skills_factor * 30.0
148
 
149
  # 3. Communication Score (Max 20 points)
150
- # Assuming 'communication_skills' is a score string '1-10' from Groq
151
  try:
152
- # Tries to extract the first number from the string (e.g., '7-High' -> 7)
153
- comm_rating = float(analysis.communication_skills.split('-')[0].strip())
 
154
  except (ValueError, IndexError):
155
- comm_rating = 5.0 # Default if Groq returns unparsable text
156
 
157
- score_comm = (comm_rating / 10.0) * 20.0 # Scale 1-10 rating to max 20 points
158
  total_score += score_comm
159
 
160
  # 4. Certification Score (Max 10 points)
161
- # Each certification adds a point, max 10 certs.
162
  score_cert = min(len(analysis.certifications), 10) * 1.0
163
  total_score += score_cert
164
 
165
  # --- Therapist-Specific Bonus Checks ---
166
  if st.session_state.get('selected_role') == "Therapist":
167
- # Additional points based on specialized scores (e.g., up to 5 points bonus)
168
  try:
169
- aba_score = float(analysis.aba_therapy_skills.split('-')[0].strip())
170
- autism_score = float(analysis.autism_care_experience_score.split('-')[0].strip())
 
171
 
172
  # Add a bonus based on the average specialized scores (max 10 points)
173
  specialized_bonus = ((aba_score + autism_score) / 20.0) * 10.0
174
  total_score += specialized_bonus
175
  except (ValueError, IndexError):
176
- pass # Ignore if specialized scores are not numbers
177
 
178
 
179
- # Final cleanup and capping
180
  final_score = round(min(total_score, 100))
181
  return float(final_score)
182
 
@@ -184,19 +188,14 @@ def calculate_resume_score(analysis: ResumeAnalysis) -> float:
184
  def append_analysis_to_dataframe(job_role: str, analysis: ResumeAnalysis, score: float):
185
  """Formats and appends the new analysis to the session state DataFrame."""
186
 
187
- # Convert Pydantic model to dictionary
188
  data = analysis.dict()
189
-
190
- # Add computed and derived fields
191
  data['Job Role'] = job_role
192
  data['Resume Score'] = score
193
- data['Shortlisted'] = 'No' # Default status
194
 
195
- # Clean up list fields for display/Excel
196
  technical_skills_list = ", ".join(data['technical_skills'])
197
  certifications_list = ", ".join(data['certifications'])
198
 
199
- # The new row data
200
  df_data = {
201
  'Name': data['name'],
202
  'Job Role': job_role,
@@ -214,18 +213,14 @@ def append_analysis_to_dataframe(job_role: str, analysis: ResumeAnalysis, score:
214
  'Autism-Care Exp (1-10)': data['autism_care_experience_score'],
215
  }
216
 
217
- # Convert to a single-row DataFrame and concatenate
218
  new_df = pd.DataFrame([df_data])
219
  st.session_state.analyzed_data = pd.concat([st.session_state.analyzed_data, new_df], ignore_index=True)
220
 
221
 
222
  # --- 4. APP LAYOUT AND LOGIC ---
223
 
224
- st.set_page_config(layout="wide", page_title="Quantum Scrutiny Platform | Groq-Powered")
225
-
226
  st.title("🌌 Quantum Scrutiny Platform: AI Resume Analysis")
227
 
228
- # --- Tabs for User and Admin ---
229
  tab_user, tab_admin = st.tabs(["πŸ‘€ Resume Uploader (User Panel)", "πŸ”’ Admin Dashboard (Password Protected)"])
230
 
231
  # =========================================================================
@@ -235,15 +230,13 @@ with tab_user:
235
  st.header("Upload Resumes for Analysis")
236
  st.info("Upload multiple PDF or DOCX files. The Groq AI engine will quickly extract and score the key data.")
237
 
238
- # Job Role Selection
239
  job_role_options = ["Software Engineer", "ML Engineer", "Therapist", "Data Analyst", "Project Manager"]
240
  selected_role = st.selectbox(
241
  "**1. Select the Target Job Role** (Influences analysis and scoring)",
242
  options=job_role_options,
243
- key='selected_role' # Store role in session state for scoring logic
244
  )
245
 
246
- # File Uploader
247
  uploaded_files = st.file_uploader(
248
  "**2. Upload Resumes** (PDF or DOCX)",
249
  type=["pdf", "docx"],
@@ -257,7 +250,6 @@ with tab_user:
257
  total_files = len(uploaded_files)
258
  progress_bar = st.progress(0)
259
 
260
- # Clear previous individual file analysis displays
261
  st.session_state.individual_analysis = []
262
 
263
  with st.status("Processing Resumes...", expanded=True) as status_box:
@@ -266,27 +258,21 @@ with tab_user:
266
  file_name = file.name
267
  st.write(f"Analyzing **{file_name}**...")
268
 
269
- # 1. Extract Text
270
  resume_text = extract_text_from_file(file)
271
 
272
  if not resume_text:
273
  st.error(f"Could not extract text from {file_name}. Skipping.")
274
  continue
275
 
276
- # 2. Analyze with Groq
277
  analysis = analyze_resume_with_groq(resume_text, selected_role)
278
 
279
  if analysis.name == "Extraction Failed":
280
  st.error(f"Groq extraction failed for {file_name}. Skipping.")
281
  continue
282
 
283
- # 3. Calculate Score
284
  score = calculate_resume_score(analysis)
285
-
286
- # 4. Store Data
287
  append_analysis_to_dataframe(selected_role, analysis, score)
288
 
289
- # Store data for individual display below
290
  st.session_state.individual_analysis.append({
291
  'name': analysis.name,
292
  'score': score,
@@ -294,14 +280,12 @@ with tab_user:
294
  'file_name': file_name
295
  })
296
 
297
- # Update progress
298
  progress_bar.progress((i + 1) / total_files)
299
 
300
  status_box.update(label="Analysis Complete!", state="complete", expanded=False)
301
 
302
  st.success(f"**βœ… Successfully analyzed {total_files} resumes.**")
303
 
304
- # Display results of the last batch of analysis
305
  if 'individual_analysis' in st.session_state and st.session_state.individual_analysis:
306
  st.subheader("Last Analysis Summary")
307
  for item in st.session_state.individual_analysis:
@@ -315,7 +299,6 @@ with tab_user:
315
  # =========================================================================
316
  with tab_admin:
317
 
318
- # --- Login Logic ---
319
  if not st.session_state.is_admin_logged_in:
320
  st.header("Admin Login")
321
  password = st.text_input("Enter Admin Password", type="password")
@@ -325,9 +308,8 @@ with tab_admin:
325
  st.rerun()
326
  else:
327
  st.error("Incorrect password.")
328
- st.stop() # Stop execution until logged in
329
 
330
- # --- Dashboard Content (Logged In) ---
331
  st.header("🎯 Recruitment Dashboard")
332
  st.markdown("---")
333
 
@@ -340,14 +322,11 @@ with tab_admin:
340
  else:
341
  df = st.session_state.analyzed_data.copy()
342
 
343
- # --- 1. Shortlisting & Data Display ---
344
  st.subheader("Candidate Data Table")
345
  st.success(f"**Total Candidates Analyzed: {len(df)}**")
346
 
347
- # Key columns for display
348
  display_cols = ['Name', 'Job Role', 'Resume Score (100)', 'Shortlisted', 'Email', 'Skills/Technologies']
349
 
350
- # Editable Data Table (allowing admin to change 'Shortlisted' status)
351
  edited_df = st.data_editor(
352
  df[display_cols],
353
  column_config={
@@ -362,19 +341,13 @@ with tab_admin:
362
  hide_index=True
363
  )
364
 
365
- # Update the session state DataFrame with the edited shortlisting status
366
- # This keeps the changes persistent
367
  st.session_state.analyzed_data['Shortlisted'] = edited_df['Shortlisted']
368
 
369
  st.markdown("---")
370
 
371
- # --- 2. Download Excel File ---
372
  st.subheader("πŸ“₯ Download Data")
373
 
374
- # The full DataFrame to export
375
  df_export = st.session_state.analyzed_data.copy()
376
-
377
- # Create an in-memory Excel file buffer
378
  excel_buffer = io.BytesIO()
379
  with pd.ExcelWriter(excel_buffer, engine='openpyxl') as writer:
380
  df_export.to_excel(writer, index=False, sheet_name='Resume Analysis Data')
 
1
+ # src/streamlit_app.py
2
+
3
  import streamlit as st
4
  import pandas as pd
5
  import io
6
  import os
7
+ import fitz
8
  import docx2txt
9
  from groq import Groq
10
  from dotenv import load_dotenv
11
  from pydantic import BaseModel, Field
12
 
13
+ # --- 0. FIX: SET PAGE CONFIG AS THE FIRST STREAMLIT COMMAND ---
14
+ st.set_page_config(layout="wide", page_title="Quantum Scrutiny Platform | Groq-Powered")
15
+
16
+ # --- 1. CONFIGURATION AND INITIALIZATION ---
17
 
18
+ # FIX for .env on local machine: Load environment variables by explicitly pointing up one directory.
19
+ load_dotenv(os.path.join(os.path.dirname(__file__), '..', '.env'))
20
 
21
+ # FIX for Hugging Face Deployment: Read the key from the environment/Secrets.
22
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
23
 
24
+ # Admin Password (as requested)
25
+ ADMIN_PASSWORD = "admin"
26
+
27
  # Initialize Groq Client
28
  if GROQ_API_KEY:
29
  try:
 
32
  st.error(f"Error initializing Groq Client: {e}")
33
  st.stop()
34
  else:
35
+ st.error("GROQ_API_KEY not found. Please ensure the key is set as a Secret in Hugging Face or in the local .env file.")
 
36
  st.stop()
37
 
 
 
 
38
  # Initialize Session State
39
  if 'is_admin_logged_in' not in st.session_state:
40
  st.session_state.is_admin_logged_in = False
41
  if 'analyzed_data' not in st.session_state:
 
42
  initial_cols = [
43
  'Name', 'Job Role', 'Resume Score (100)', 'Email', 'Phone', 'Shortlisted',
44
  'Experience Summary', 'Education Summary', 'Communication Rating (1-10)',
 
58
  certifications: list[str] = Field(description="List of professional certifications (e.g., PMP, AWS Certified).")
59
  experience_summary: str = Field(description="A concise summary of the candidate's professional experience.")
60
  education_summary: str = Field(description="A concise summary of the candidate's highest education.")
61
+
62
+ # --- FIX 1: Explicitly describe required STRING output format ---
63
+ communication_skills: str = Field(description="A score as a STRING (e.g., '8') or brief description of communication skills.")
64
  technical_skills: list[str] = Field(description="List of technical skills/technologies mentioned (e.g., Python, SQL, Docker).")
65
+ aba_therapy_skills: str = Field(description="Specific mention or score as a STRING (e.g., '7') for ABA Therapy skills, ONLY if the role is 'Therapist'. Use the STRING 'N/A' if not applicable or found.")
66
+ rbt_bcba_certification: str = Field(description="Indicate the STRING 'Yes' or 'No' if RBT/BCBA certification is mentioned, ONLY if the role is 'Therapist'. Use the STRING 'N/A' if not applicable or found.")
67
+ autism_care_experience_score: str = Field(description="A score as a STRING (e.g., '9') for Autism-Care Experience, ONLY if the role is 'Therapist'. Use the STRING 'N/A' if not applicable or found.")
68
+
69
 
70
  # --- 3. HELPER FUNCTIONS ---
71
 
 
74
  file_type = uploaded_file.type
75
  try:
76
  if file_type == "application/pdf":
 
77
  with fitz.open(stream=uploaded_file.read(), filetype="pdf") as doc:
78
  text = ""
79
  for page in doc:
80
  text += page.get_text()
81
  return text
82
  elif file_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
 
83
  return docx2txt.process(uploaded_file)
84
  else:
85
  return ""
86
  except Exception as e:
87
+ print(f"Error extracting text: {e}")
88
  return ""
89
 
90
  @st.cache_data(show_spinner="Analyzing resume with Groq...")
 
97
  therapist_instructions = (
98
  "Because the job role is 'Therapist', you MUST carefully look for: "
99
  "1. ABA Therapy Skills, RBT/BCBA Certification, and Autism-Care Experience. "
100
+ "2. Provide a score from 1-10 as a **STRING** (e.g., '7') for the specialized fields: 'aba_therapy_skills' and 'autism_care_experience_score'. "
101
+ "3. If any specialized therapist field is not found, you MUST use the **STRING** 'N/A'. "
102
+ "4. Set 'rbt_bcba_certification' to the **STRING** 'Yes' or 'No'."
103
+ )
104
+ else:
105
+ # For non-therapist roles, explicitly instruct the model to use 'N/A' for therapist fields
106
+ therapist_instructions = (
107
+ "Since the role is not 'Therapist', set 'aba_therapy_skills', 'autism_care_experience_score', and 'rbt_bcba_certification' to the **STRING** 'N/A'."
108
  )
109
 
110
  # System Prompt for Groq
 
112
  f"You are a professional Resume Analyzer. Your task is to extract specific information from the provided resume text. "
113
  f"The candidate is applying for the role of '{job_role}'. "
114
  f"Follow the instructions precisely and return a JSON object that strictly adheres to the provided Pydantic schema. "
115
+ f"**IMPORTANT:** All values must be returned as the data type specified. Numerical scores must be enclosed in quotes to be treated as **STRING** types (e.g., \"8\"). "
116
  f"For skills, provide a list of 5-10 most relevant items. {therapist_instructions}"
117
  )
118
 
 
132
  return analysis
133
 
134
  except Exception as e:
135
+ # This will now only catch errors related to the API connection or Pydantic structural errors
136
+ # (e.g., list vs string), not the common type mismatches.
137
+ st.error(f"Groq API Error: {e}")
138
  # Return an empty/default analysis object on failure
139
+ return ResumeAnalysis(name="Extraction Failed", email="", phone="", certifications=[], experience_summary="", education_summary="", communication_skills="N/A", technical_skills=[], aba_therapy_skills="N/A", rbt_bcba_certification="N/A", autism_care_experience_score="N/A")
140
 
141
 
142
  def calculate_resume_score(analysis: ResumeAnalysis) -> float:
143
  """Calculates the weighted score out of 100."""
144
 
 
 
 
145
  total_score = 0.0
146
 
147
  # 1. Experience Score (Max 40 points)
148
+ exp_factor = min(len(analysis.experience_summary) / 100.0, 1.0)
 
 
149
  total_score += exp_factor * 40.0
150
 
151
  # 2. Skills Score (Max 30 points)
 
 
152
  skills_factor = min(len(analysis.technical_skills) / 10.0, 1.0)
153
  total_score += skills_factor * 30.0
154
 
155
  # 3. Communication Score (Max 20 points)
 
156
  try:
157
+ # Safely parse the communication score string (e.g., '8' or '8-High')
158
+ score_str = analysis.communication_skills.split('-')[0].strip()
159
+ comm_rating = float(score_str)
160
  except (ValueError, IndexError):
161
+ comm_rating = 5.0 # Default if unparsable
162
 
163
+ score_comm = (comm_rating / 10.0) * 20.0
164
  total_score += score_comm
165
 
166
  # 4. Certification Score (Max 10 points)
 
167
  score_cert = min(len(analysis.certifications), 10) * 1.0
168
  total_score += score_cert
169
 
170
  # --- Therapist-Specific Bonus Checks ---
171
  if st.session_state.get('selected_role') == "Therapist":
 
172
  try:
173
+ # Safely parse specialized scores, handling 'N/A'
174
+ aba_score = float(analysis.aba_therapy_skills.split('-')[0].strip()) if analysis.aba_therapy_skills != 'N/A' else 0.0
175
+ autism_score = float(analysis.autism_care_experience_score.split('-')[0].strip()) if analysis.autism_care_experience_score != 'N/A' else 0.0
176
 
177
  # Add a bonus based on the average specialized scores (max 10 points)
178
  specialized_bonus = ((aba_score + autism_score) / 20.0) * 10.0
179
  total_score += specialized_bonus
180
  except (ValueError, IndexError):
181
+ pass
182
 
183
 
 
184
  final_score = round(min(total_score, 100))
185
  return float(final_score)
186
 
 
188
  def append_analysis_to_dataframe(job_role: str, analysis: ResumeAnalysis, score: float):
189
  """Formats and appends the new analysis to the session state DataFrame."""
190
 
 
191
  data = analysis.dict()
 
 
192
  data['Job Role'] = job_role
193
  data['Resume Score'] = score
194
+ data['Shortlisted'] = 'No'
195
 
 
196
  technical_skills_list = ", ".join(data['technical_skills'])
197
  certifications_list = ", ".join(data['certifications'])
198
 
 
199
  df_data = {
200
  'Name': data['name'],
201
  'Job Role': job_role,
 
213
  'Autism-Care Exp (1-10)': data['autism_care_experience_score'],
214
  }
215
 
 
216
  new_df = pd.DataFrame([df_data])
217
  st.session_state.analyzed_data = pd.concat([st.session_state.analyzed_data, new_df], ignore_index=True)
218
 
219
 
220
  # --- 4. APP LAYOUT AND LOGIC ---
221
 
 
 
222
  st.title("🌌 Quantum Scrutiny Platform: AI Resume Analysis")
223
 
 
224
  tab_user, tab_admin = st.tabs(["πŸ‘€ Resume Uploader (User Panel)", "πŸ”’ Admin Dashboard (Password Protected)"])
225
 
226
  # =========================================================================
 
230
  st.header("Upload Resumes for Analysis")
231
  st.info("Upload multiple PDF or DOCX files. The Groq AI engine will quickly extract and score the key data.")
232
 
 
233
  job_role_options = ["Software Engineer", "ML Engineer", "Therapist", "Data Analyst", "Project Manager"]
234
  selected_role = st.selectbox(
235
  "**1. Select the Target Job Role** (Influences analysis and scoring)",
236
  options=job_role_options,
237
+ key='selected_role'
238
  )
239
 
 
240
  uploaded_files = st.file_uploader(
241
  "**2. Upload Resumes** (PDF or DOCX)",
242
  type=["pdf", "docx"],
 
250
  total_files = len(uploaded_files)
251
  progress_bar = st.progress(0)
252
 
 
253
  st.session_state.individual_analysis = []
254
 
255
  with st.status("Processing Resumes...", expanded=True) as status_box:
 
258
  file_name = file.name
259
  st.write(f"Analyzing **{file_name}**...")
260
 
 
261
  resume_text = extract_text_from_file(file)
262
 
263
  if not resume_text:
264
  st.error(f"Could not extract text from {file_name}. Skipping.")
265
  continue
266
 
 
267
  analysis = analyze_resume_with_groq(resume_text, selected_role)
268
 
269
  if analysis.name == "Extraction Failed":
270
  st.error(f"Groq extraction failed for {file_name}. Skipping.")
271
  continue
272
 
 
273
  score = calculate_resume_score(analysis)
 
 
274
  append_analysis_to_dataframe(selected_role, analysis, score)
275
 
 
276
  st.session_state.individual_analysis.append({
277
  'name': analysis.name,
278
  'score': score,
 
280
  'file_name': file_name
281
  })
282
 
 
283
  progress_bar.progress((i + 1) / total_files)
284
 
285
  status_box.update(label="Analysis Complete!", state="complete", expanded=False)
286
 
287
  st.success(f"**βœ… Successfully analyzed {total_files} resumes.**")
288
 
 
289
  if 'individual_analysis' in st.session_state and st.session_state.individual_analysis:
290
  st.subheader("Last Analysis Summary")
291
  for item in st.session_state.individual_analysis:
 
299
  # =========================================================================
300
  with tab_admin:
301
 
 
302
  if not st.session_state.is_admin_logged_in:
303
  st.header("Admin Login")
304
  password = st.text_input("Enter Admin Password", type="password")
 
308
  st.rerun()
309
  else:
310
  st.error("Incorrect password.")
311
+ st.stop()
312
 
 
313
  st.header("🎯 Recruitment Dashboard")
314
  st.markdown("---")
315
 
 
322
  else:
323
  df = st.session_state.analyzed_data.copy()
324
 
 
325
  st.subheader("Candidate Data Table")
326
  st.success(f"**Total Candidates Analyzed: {len(df)}**")
327
 
 
328
  display_cols = ['Name', 'Job Role', 'Resume Score (100)', 'Shortlisted', 'Email', 'Skills/Technologies']
329
 
 
330
  edited_df = st.data_editor(
331
  df[display_cols],
332
  column_config={
 
341
  hide_index=True
342
  )
343
 
 
 
344
  st.session_state.analyzed_data['Shortlisted'] = edited_df['Shortlisted']
345
 
346
  st.markdown("---")
347
 
 
348
  st.subheader("πŸ“₯ Download Data")
349
 
 
350
  df_export = st.session_state.analyzed_data.copy()
 
 
351
  excel_buffer = io.BytesIO()
352
  with pd.ExcelWriter(excel_buffer, engine='openpyxl') as writer:
353
  df_export.to_excel(writer, index=False, sheet_name='Resume Analysis Data')