Imarticuslearning commited on
Commit
c78665e
·
verified ·
1 Parent(s): 4859243

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -6
app.py CHANGED
@@ -318,15 +318,16 @@ def evaluate_answers():
318
  [f"Question {i+1}: {entry['question']}\nCandidate's Answer {i+1}: {str(entry.get('response', '[No response provided]'))}"
319
  for i, entry in enumerate(st.session_state["answers"])]
320
  )
 
321
  full_prompt_for_hr_evaluation = f"{hr_prompt_template}\n\nCandidate's Interview Answers (Consider all of these for holistic parameter scoring):\n{candidate_responses_formatted_hr}"
322
 
323
  try:
324
  response_content = model.generate_content(full_prompt_for_hr_evaluation)
325
  full_llm_response_text = response_content.text.strip()
326
-
327
- print("--- LLM Output for HR Score Extraction ---")
328
  print(full_llm_response_text)
329
- print("-----------------------------------------")
 
330
 
331
  hr_parameter_scores_parsed_dict = {} # To store parsed scores for each HR param
332
  total_weighted_score_percentage = 0.0
@@ -357,8 +358,14 @@ def evaluate_answers():
357
  total_weighted_score_percentage += (param_score / 5.0) * config_data["weight_normalized"] # Use normalized weight
358
 
359
  st.session_state["hr_parameter_scores_dict"] = hr_parameter_scores_parsed_dict # Store for table display
360
- st.session_state["overall_score"] = round(total_weighted_score_percentage, 1)
361
- st.session_state["percentage_score"] = round(total_weighted_score_percentage, 1)
 
 
 
 
 
 
362
 
363
  # Construct the feedback to be displayed: Parsed scores + Qualitative from LLM
364
  # The full_llm_response_text might still be useful if qualitative parsing is tricky
@@ -1002,10 +1009,45 @@ if st.session_state.get("show_summary", False):
1002
  if num_qs_in_session == 0 and st.session_state.get("answers"): # Fallback if no generated_questions but answers exist
1003
  num_qs_in_session = len(st.session_state.answers)
1004
 
1005
- max_score_possible_for_session = num_qs_in_session * 5.0
 
 
 
 
 
 
 
 
 
 
 
1006
  current_percentage_score = st.session_state.get('percentage_score', 0.0)
1007
  current_overall_score = st.session_state.get('overall_score', 0.0)
1008
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009
  # Display the calculated score and percentage bar first in a card
1010
  st.markdown(f"""
1011
  <div class='summary-card' style="margin-bottom: 20px;">
 
318
  [f"Question {i+1}: {entry['question']}\nCandidate's Answer {i+1}: {str(entry.get('response', '[No response provided]'))}"
319
  for i, entry in enumerate(st.session_state["answers"])]
320
  )
321
+ #full_prompt_for_hr_evaluation = f"{hr_prompt_template}\n\nCandidate's Interview Answers:\n{candidate_responses_formatted_hr}"
322
  full_prompt_for_hr_evaluation = f"{hr_prompt_template}\n\nCandidate's Interview Answers (Consider all of these for holistic parameter scoring):\n{candidate_responses_formatted_hr}"
323
 
324
  try:
325
  response_content = model.generate_content(full_prompt_for_hr_evaluation)
326
  full_llm_response_text = response_content.text.strip()
327
+ print("--- FULL LLM SOFT SKILLS RESPONSE ---")
 
328
  print(full_llm_response_text)
329
+ print("------ END RESPONSE ------")
330
+ print("--- AI Full Response for Soft Skills ---\n", full_llm_response_text, "\n------------------------")
331
 
332
  hr_parameter_scores_parsed_dict = {} # To store parsed scores for each HR param
333
  total_weighted_score_percentage = 0.0
 
358
  total_weighted_score_percentage += (param_score / 5.0) * config_data["weight_normalized"] # Use normalized weight
359
 
360
  st.session_state["hr_parameter_scores_dict"] = hr_parameter_scores_parsed_dict # Store for table display
361
+
362
+ num_qs_in_session = len(st.session_state.get("answers", []))
363
+ max_possible_score = num_qs_in_session * 5.0 # Each Q worth 5
364
+ actual_score = (total_weighted_score_percentage / 100.0) * max_possible_score
365
+
366
+ st.session_state["overall_score"] = round(actual_score, 1)
367
+ st.session_state["percentage_score"] = round((actual_score / max_possible_score) * 100, 1)
368
+
369
 
370
  # Construct the feedback to be displayed: Parsed scores + Qualitative from LLM
371
  # The full_llm_response_text might still be useful if qualitative parsing is tricky
 
1009
  if num_qs_in_session == 0 and st.session_state.get("answers"): # Fallback if no generated_questions but answers exist
1010
  num_qs_in_session = len(st.session_state.answers)
1011
 
1012
+ if st.session_state["selected_domain"] == "Soft Skills":
1013
+ num_qs_in_session = len(st.session_state.get("answers", []))
1014
+ max_score_possible_for_session = num_qs_in_session * 5.0
1015
+
1016
+ else:
1017
+ if st.session_state["selected_domain"] == "Soft Skills":
1018
+ num_hr_params = len(st.session_state.get("hr_parameter_scores_dict", {}))
1019
+ max_score_possible_for_session = num_hr_params * 5.0
1020
+ else:
1021
+ max_score_possible_for_session = num_qs_in_session * 5.0
1022
+
1023
+ #max_score_possible_for_session = num_qs_in_session * 5.0
1024
  current_percentage_score = st.session_state.get('percentage_score', 0.0)
1025
  current_overall_score = st.session_state.get('overall_score', 0.0)
1026
 
1027
+ if st.session_state["selected_domain"] == "Soft Skills":
1028
+ hr_table_data = []
1029
+ for param, config in HR_PARAMETERS_CONFIG.items():
1030
+ score = st.session_state.get("hr_parameter_scores_dict", {}).get(param, 0.0)
1031
+ weight_percent = config["weight_original"]
1032
+ contribution = (score / 5.0) * config["weight_normalized"]
1033
+ hr_table_data.append({
1034
+ "Parameter": param,
1035
+ "Weight (Original %)": f"{weight_percent}%",
1036
+ "Score (1–5)": round(score, 1),
1037
+ "Contribution to Final %": f"{contribution:.1f}%"
1038
+ })
1039
+
1040
+ hr_table_data.append({
1041
+ "Parameter": "Total",
1042
+ "Weight (Original %)": "100%",
1043
+ "Score (1–5)": "",
1044
+ "Contribution to Final %": f"{current_percentage_score:.1f}%"
1045
+ })
1046
+
1047
+ r_df = pd.DataFrame(hr_table_data)
1048
+ st.markdown("### 🧾 Soft Skills Evaluation Breakdown")
1049
+ st.dataframe(hr_df, use_container_width=True)
1050
+
1051
  # Display the calculated score and percentage bar first in a card
1052
  st.markdown(f"""
1053
  <div class='summary-card' style="margin-bottom: 20px;">