Imarticuslearning commited on
Commit
c60bc97
·
verified ·
1 Parent(s): c99ef71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -48
app.py CHANGED
@@ -390,44 +390,38 @@ def evaluate_answers():
390
  else: # --- NON-HR (Analytics, Finance) Evaluation Logic ---
391
  base_assessment_criteria_qualitative_non_hr = """
392
  For the OVERALL qualitative summary, assess responses based on:
393
- - Conceptual Understanding (effort and relevance more than perfect accuracy for the level)
394
- - Communication Clarity (can the core idea be understood?)
395
- - Depth of Explanation (relative to expected level)
396
- - Use of Examples (if any, and if appropriate for the level)
397
- - Logical Flow (is there a basic structure or train of thought?)
398
  """
399
  per_question_scoring_guidelines_non_hr = f"""
400
- For EACH question and its answer, provide a score from 0 to 5 points.
401
- The candidate is at a {level_string} level.
402
- Scoring should reflect your role as a balanced evaluator — neither too lenient nor too harsh. Use wisdom and professional judgment.
403
-
404
- Consider the following when assigning the score:
405
- - Effort: Was there a genuine attempt to answer meaningfully?
406
- - Relevance: Is the response at least partially on-topic?
407
- - Clarity: Is the main idea understandable?
408
- - Logic: Does the response have a clear train of thought?
409
- - Examples: Were examples used when appropriate?
410
-
411
- Scoring Rubric:
412
- - **5 points:** Clear, relevant, and reasonably accurate with good structure. Shows understanding and effort.
413
- - **4 points:** Mostly correct with minor gaps or unclear phrasing. Shows some grasp and engagement.
414
- - **3 points:** Partially correct or somewhat vague, but a fair attempt is made.
415
- - **1–2 points:** Minimal relevance or weak structure, but not entirely blank or off-topic.
416
- - **0 points:** Candidate says “I don’t know,” “not sure,” or gives unrelated/placeholder text.
417
  """
418
-
419
  if level_string == "beginner":
420
  level_specific_instructions_non_hr = """
421
- You are a **supportive but realistic evaluator** for a **BEGINNER/FRESHER**. Encourage growth and confidence, but give honest scores based on visible effort, relevance, and basic understanding.
 
 
 
 
 
 
 
422
  """
423
  elif level_string == "intermediate":
424
- level_specific_instructions_non_hr = """
425
- You are a **professional and balanced evaluator** for an **INTERMEDIATE** candidate. Score honestly based on clarity, partial correctness, and application effort.
426
- """
427
- else: # advanced
428
- level_specific_instructions_non_hr = """
429
- You are a **discerning but fair evaluator** for an **ADVANCED** candidate. Expect depth and accuracy, but score reasonably when structure and effort are evident.
430
- """
431
  evaluation_prompt_template_non_hr = f"""
432
  {level_specific_instructions_non_hr}
433
  {per_question_scoring_guidelines_non_hr}
@@ -711,24 +705,53 @@ st.sidebar.markdown(f"**Selected Domain:** {st.session_state['selected_domain']}
711
  num_qs = st.sidebar.slider("Number of Questions:", 1, 10, 3)
712
 
713
  if st.session_state["selected_domain"] == "Soft Skills":
 
 
 
 
 
 
 
 
 
 
 
714
  if st.sidebar.button("Generate Questions"):
715
- st.session_state["generated_questions"] = sample(hr_questions, num_qs)
 
 
 
 
 
 
 
 
 
 
 
716
  st.session_state["current_question_index"] = 0
717
  st.rerun()
 
718
  else:
719
- section_choice = st.sidebar.radio("Choose Input Type:", ("Resume", "Job Description", "Skills"))
720
- difficulty = st.sidebar.selectbox("Select Difficulty Level:", ["Beginner", "Intermediate", "Advanced"])
 
 
 
721
  input_text = ""
722
 
723
  if section_choice == "Resume":
 
724
  uploaded_file = st.sidebar.file_uploader("Upload Resume:", type=["pdf", "txt"])
725
  if uploaded_file:
726
  input_text = extract_pdf_text(uploaded_file)
727
 
728
  elif section_choice == "Job Description":
 
729
  input_text = st.sidebar.text_area("Paste Job Description:")
730
 
731
  elif section_choice == "Skills":
 
732
  input_text = ""
733
 
734
  if st.session_state["selected_domain"] == "Finance":
@@ -737,21 +760,27 @@ else:
737
 
738
  difficulty = st.session_state.get("difficulty", "Beginner")
739
 
740
- if selected_level != "Level-1":
 
 
 
 
 
 
741
  st.sidebar.warning(f"🚧 {selected_level} content is still under development. Please select Level-1 to continue.")
742
  st.stop()
743
 
744
  # Map difficulty level to column in Excel
745
  column_map = {
746
- "Beginner": "MODULE 1-EASY",
747
- "Intermediate": "MODULE 1-MEDIUM",
748
- "Advanced": "MODULE 1-DIFFICULT"
749
  }
750
 
751
  selected_column = column_map[difficulty]
752
 
753
  # Load Excel and questions
754
- excel_path = os.path.join("data", "CIBOP Mock Questions.xlsx")
755
  try:
756
  df = pd.read_excel(excel_path, engine="openpyxl")
757
  questions_from_excel = df[selected_column].dropna().astype(str).tolist()
@@ -762,16 +791,57 @@ else:
762
 
763
  st.sidebar.success(f"✅ Loaded {difficulty}-level questions from {selected_level}")
764
 
765
- else:
766
- # For Analytics or any other domain
767
- skills = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
768
  "Analytics": ["Python", "SQL", "Machine Learning", "Statistics", "Business Analytics"]
769
  }
770
- skill_list = skills.get(st.session_state["selected_domain"], [])
771
- if skill_list:
772
- selected_skill = st.sidebar.selectbox("Select a Skill:", skill_list, key="skill_select")
773
- input_text = selected_skill
774
- st.sidebar.markdown(f"✅ Selected Skill: **{selected_skill}**")
775
 
776
 
777
  if st.sidebar.button("Generate Questions"):
@@ -779,7 +849,7 @@ else:
779
  st.warning("⚠️ Please provide input based on the selected method.")
780
  st.stop()
781
 
782
- if st.session_state["selected_domain"] == "Finance" and section_choice == "Skills":
783
  st.session_state["generated_questions"] = sample(questions_from_excel, min(num_qs, len(questions_from_excel)))
784
  else:
785
  prompt = f"Ask {num_qs} direct and core-level {difficulty} interview questions related to {input_text}. Do not include intros or numbering."
 
390
  else: # --- NON-HR (Analytics, Finance) Evaluation Logic ---
391
  base_assessment_criteria_qualitative_non_hr = """
392
  For the OVERALL qualitative summary, assess responses based on:
393
+ - Conceptual Understanding (effort and relevance more than perfect accuracy for the level)
394
+ - Communication Clarity (can the core idea be understood?)
395
+ - Depth of Explanation (relative to expected level)
396
+ - Use of Examples (if any, and if appropriate for the level)
397
+ - Logical Flow (is there a basic structure or train of thought?)
398
  """
399
  per_question_scoring_guidelines_non_hr = f"""
400
+ For EACH question and its answer, provide a score from 0 to 5 points.
401
+ The candidate is at a {level_string} level.
402
+ Consider the following when assigning the per-question score:
403
+ - Effort: Did the candidate attempt a meaningful answer, even if partially incorrect?
404
+ - Relevance: Is the response at least partially related to the question topic?
405
+ - Clarity of thought for the candidate's level.
406
+ - Basic logical structure.
407
+ - Use of examples, if any were given and appropriate.
 
 
 
 
 
 
 
 
 
408
  """
 
409
  if level_string == "beginner":
410
  level_specific_instructions_non_hr = """
411
+ You are an **extremely understanding, encouraging, and supportive** interview evaluator for a **BEGINNER/FRESHER**. Your primary goal is to **build confidence**.
412
+ **Scoring Guidelines for Beginners (0-5 points per question):**
413
+ - **5 points:** Accurate, clear, and well-structured answer. Shows clear effort and basic understanding.
414
+ - **4 points:** Mostly correct with minor gaps or unclear phrasing.Good attempt, relevant, shows some understanding or key terms (e.g., one/two relevant words).
415
+ - **3 points:** Partially correct with evident effort, but lacks clarity or completeness.
416
+ - **1-2 points:** Minimal effort, mostly irrelevant, but an attempt beyond silence.
417
+ - **0 points:** Candidate explicitly says "I don’t know", "I'm not sure", or provides placeholder/non-answers. No relevant effort or understanding shown.Incorrect or unrelated.
418
+ Provide VERY positive feedback.
419
  """
420
  elif level_string == "intermediate":
421
+ level_specific_instructions_non_hr = """Supportive evaluator for **INTERMEDIATE**. Scoring (0-5): 5=Correct/Clear; 3-4=Mostly correct; 1-2=Partial/Gaps; 0=Incorrect."""
422
+ else: # Advanced
423
+ level_specific_instructions_non_hr = """Discerning evaluator for **ADVANCED**. Scoring (0-5): 5=Accurate/Comprehensive; 3-4=Correct lacks nuance; 1-2=Inaccurate; 0=Fundamentally incorrect."""
424
+
 
 
 
425
  evaluation_prompt_template_non_hr = f"""
426
  {level_specific_instructions_non_hr}
427
  {per_question_scoring_guidelines_non_hr}
 
705
  num_qs = st.sidebar.slider("Number of Questions:", 1, 10, 3)
706
 
707
  if st.session_state["selected_domain"] == "Soft Skills":
708
+ soft_skill_mode = st.sidebar.radio(
709
+ "Choose Soft Skills Mode:",
710
+ ("Resume-Based", "HR Round")
711
+ )
712
+ if soft_skill_mode == "Resume-Based":
713
+ uploaded_file = st.sidebar.file_uploader("Upload Resume:", type=["pdf", "txt"])
714
+ if uploaded_file:
715
+ input_text = extract_pdf_text(uploaded_file)
716
+ else:
717
+ input_text = "General HR Round"
718
+
719
  if st.sidebar.button("Generate Questions"):
720
+ if soft_skill_mode == "HR Round":
721
+ st.session_state["generated_questions"] = sample(hr_questions, num_qs)
722
+ else:
723
+ if not input_text.strip():
724
+ st.warning("⚠️ Please upload a resume.")
725
+ st.stop()
726
+ prompt = f"Ask {num_qs} HR-style interview questions based on this resume: {input_text}"
727
+ model = genai.GenerativeModel('gemini-1.5-pro-latest')
728
+ response = model.generate_content([prompt])
729
+ questions = [q.strip("* ") for q in response.text.strip().split("\n") if q.strip()]
730
+ st.session_state["generated_questions"] = questions[:num_qs]
731
+
732
  st.session_state["current_question_index"] = 0
733
  st.rerun()
734
+
735
  else:
736
+ section_choice = st.sidebar.radio(
737
+ "Choose Input Type:",
738
+ ("Resume", "Job Description", "Skills", "Company Specific") if st.session_state["selected_domain"] == "Finance" else ("Resume", "Job Description", "Skills")
739
+ )
740
+ #difficulty = st.sidebar.selectbox("Select Difficulty Level:", ["Beginner", "Intermediate", "Advanced"])
741
  input_text = ""
742
 
743
  if section_choice == "Resume":
744
+ difficulty = st.sidebar.selectbox("Select Difficulty Level:", ["Beginner", "Intermediate", "Advanced"])
745
  uploaded_file = st.sidebar.file_uploader("Upload Resume:", type=["pdf", "txt"])
746
  if uploaded_file:
747
  input_text = extract_pdf_text(uploaded_file)
748
 
749
  elif section_choice == "Job Description":
750
+ difficulty = st.sidebar.selectbox("Select Difficulty Level:", ["Beginner", "Intermediate", "Advanced"])
751
  input_text = st.sidebar.text_area("Paste Job Description:")
752
 
753
  elif section_choice == "Skills":
754
+ difficulty = st.sidebar.selectbox("Select Difficulty Level:", ["Beginner", "Intermediate", "Advanced"])
755
  input_text = ""
756
 
757
  if st.session_state["selected_domain"] == "Finance":
 
760
 
761
  difficulty = st.session_state.get("difficulty", "Beginner")
762
 
763
+ if selected_level == "Level-1":
764
+ excel_filename = "CIBOP Mock Questions.xlsx"
765
+ module_prefix = "MODULE 1"
766
+ elif selected_level == "Level-2":
767
+ excel_filename = "CIBOP Level2.xlsx"
768
+ module_prefix = "MODULE 2"
769
+ else:
770
  st.sidebar.warning(f"🚧 {selected_level} content is still under development. Please select Level-1 to continue.")
771
  st.stop()
772
 
773
  # Map difficulty level to column in Excel
774
  column_map = {
775
+ "Beginner": f"{module_prefix}-EASY",
776
+ "Intermediate": f"{module_prefix}-MEDIUM",
777
+ "Advanced": f"{module_prefix}-DIFFICULT"
778
  }
779
 
780
  selected_column = column_map[difficulty]
781
 
782
  # Load Excel and questions
783
+ excel_path = os.path.join("data", excel_filename)
784
  try:
785
  df = pd.read_excel(excel_path, engine="openpyxl")
786
  questions_from_excel = df[selected_column].dropna().astype(str).tolist()
 
791
 
792
  st.sidebar.success(f"✅ Loaded {difficulty}-level questions from {selected_level}")
793
 
794
+ elif section_choice == "Company Specific" and st.session_state["selected_domain"] == "Finance":
795
+ excel_path = os.path.join("data", "Company Specific.xlsx")
796
+ try:
797
+ # Load Excel and get sheet names (company names)
798
+ xls = pd.ExcelFile(excel_path, engine="openpyxl")
799
+ company_names = xls.sheet_names
800
+ except Exception as e:
801
+ st.sidebar.error(f"❌ Error loading company-specific Excel: {e}")
802
+ st.stop()
803
+
804
+ selected_company = st.sidebar.selectbox("Select Company:", company_names)
805
+
806
+ try:
807
+ # Load the selected company's sheet
808
+ df = pd.read_excel(excel_path, sheet_name=selected_company, engine="openpyxl")
809
+
810
+ if "Job Role" not in df.columns:
811
+ st.sidebar.error(f"❌ 'JobRole' column not found in sheet '{selected_company}'.")
812
+ st.stop()
813
+
814
+ job_roles = sorted(df["Job Role"].dropna().unique())
815
+ selected_job_role = st.sidebar.selectbox("Select Job Role:", job_roles)
816
+
817
+ filtered_df = df[df["Job Role"] == selected_job_role]
818
+
819
+ if "Question" in filtered_df.columns:
820
+ questions_from_excel = filtered_df["Question"].dropna().astype(str).tolist()
821
+ else:
822
+ question_cols = [col for col in filtered_df.columns if col != "JobRole"]
823
+ if not question_cols:
824
+ st.sidebar.error(f"❌ No question column found in '{selected_company}' sheet.")
825
+ st.stop()
826
+ questions_from_excel = filtered_df[question_cols[0]].dropna().astype(str).tolist()
827
+
828
+ input_text = f"{selected_company} - {selected_job_role}"
829
+ st.sidebar.success(f"✅ Loaded {len(questions_from_excel)} questions for {selected_company} / {selected_job_role}")
830
+
831
+ except Exception as e:
832
+ st.sidebar.error(f"❌ Error reading sheet '{selected_company}': {e}")
833
+ st.stop()
834
+
835
+ else:
836
+ # For Analytics or any other domain
837
+ skills = {
838
  "Analytics": ["Python", "SQL", "Machine Learning", "Statistics", "Business Analytics"]
839
  }
840
+ skill_list = skills.get(st.session_state["selected_domain"], [])
841
+ if skill_list:
842
+ selected_skill = st.sidebar.selectbox("Select a Skill:", skill_list, key="skill_select")
843
+ input_text = selected_skill
844
+ st.sidebar.markdown(f"✅ Selected Skill: **{selected_skill}**")
845
 
846
 
847
  if st.sidebar.button("Generate Questions"):
 
849
  st.warning("⚠️ Please provide input based on the selected method.")
850
  st.stop()
851
 
852
+ if st.session_state["selected_domain"] == "Finance" and section_choice in ["Skills","Company Specific"]:
853
  st.session_state["generated_questions"] = sample(questions_from_excel, min(num_qs, len(questions_from_excel)))
854
  else:
855
  prompt = f"Ask {num_qs} direct and core-level {difficulty} interview questions related to {input_text}. Do not include intros or numbering."