File size: 8,022 Bytes
358dfff | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 | import json
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models.application import Application
from models.assessment import Assessment
from models.job import Job
from models.user import User
from models.base import Base
from config import settings
from services.application_service import calculate_application_score
from uuid import uuid4
def test_answer_handling():
"""Test that answers are handled correctly without being treated as separate models"""
# Create a test database session
engine = create_engine(settings.database_url, connect_args={"check_same_thread": False})
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Create tables if they don't exist
Base.metadata.create_all(bind=engine)
# Create a test session
db = TestingSessionLocal()
try:
# Create a test job
test_job = Job(
id=str(uuid4()),
title="Software Engineer",
seniority="mid",
description="Test job for assessment",
skill_categories='["programming", "python", "fastapi"]'
)
db.add(test_job)
db.commit()
# Create a test assessment with questions
test_questions = [
{
"id": "q1",
"text": "What is Python?",
"weight": 3,
"skill_categories": ["programming", "python"],
"type": "choose_one",
"options": [
{"text": "A snake", "value": "a"},
{"text": "A programming language", "value": "b"},
{"text": "An IDE", "value": "c"}
],
"correct_options": ["b"]
},
{
"id": "q2",
"text": "What is 2+2?",
"weight": 2,
"skill_categories": ["math"],
"type": "choose_one",
"options": [
{"text": "3", "value": "a"},
{"text": "4", "value": "b"},
{"text": "5", "value": "c"}
],
"correct_options": ["b"]
}
]
test_assessment = Assessment(
id=str(uuid4()),
job_id=test_job.id,
title="Programming Skills Assessment",
passing_score=70,
questions=json.dumps(test_questions)
)
db.add(test_assessment)
db.commit()
# Create a test user
test_user = User(
id=str(uuid4()),
first_name="John",
last_name="Doe",
email=f"test_{str(uuid4())[:8]}@example.com",
role="applicant"
)
test_user.set_password("password123")
db.add(test_user)
db.commit()
# Create a test application with answers
test_answers = [
{
"question_id": "q1",
"text": "",
"options": ["b"] # Correct answer for question 1
},
{
"question_id": "q2",
"text": "",
"options": ["b"] # Correct answer for question 2
}
]
test_application = Application(
id=str(uuid4()),
job_id=test_job.id,
assessment_id=test_assessment.id,
user_id=test_user.id,
answers=json.dumps(test_answers)
)
db.add(test_application)
db.commit()
# Test the score calculation
calculated_score = calculate_application_score(db, test_application.id)
print(f"Calculated score for application: {calculated_score}%")
# Since both answers are correct, the score should be 100%
expected_total_points = 3 + 2 # weights of both questions
expected_earned_points = 3 + 2 # both answers are correct
expected_percentage = (expected_earned_points / expected_total_points) * 100
assert calculated_score == expected_percentage, f"Expected {expected_percentage}%, got {calculated_score}%"
print("[PASS] Score calculation is correct for all correct answers")
# Create another application with some incorrect answers
test_answers_partial = [
{
"question_id": "q1",
"text": "",
"options": ["a"] # Wrong answer for question 1
},
{
"question_id": "q2",
"text": "",
"options": ["b"] # Correct answer for question 2
}
]
test_application_partial = Application(
id=str(uuid4()),
job_id=test_job.id,
assessment_id=test_assessment.id,
user_id=test_user.id,
answers=json.dumps(test_answers_partial)
)
db.add(test_application_partial)
db.commit()
# Test the score calculation for partial correct answers
calculated_score_partial = calculate_application_score(db, test_application_partial.id)
print(f"Calculated score for partial application: {calculated_score_partial}%")
# Expected: 2 points earned (question 2) out of 5 total points
expected_partial_percentage = (2 / 5) * 100 # 40%
assert calculated_score_partial == expected_partial_percentage, f"Expected {expected_partial_percentage}%, got {calculated_score_partial}%"
print("[PASS] Score calculation is correct for partial correct answers")
# Test with a text-based question
text_question = [
{
"id": "q3",
"text": "Describe the difference between list and tuple in Python.",
"weight": 5,
"skill_categories": ["python"],
"type": "text_based",
"options": [],
"correct_options": []
}
]
# Update the assessment with the text question
all_questions = test_questions + text_question
test_assessment.questions = json.dumps(all_questions)
db.commit()
# Create an application with a text answer
text_answers = [
{
"question_id": "q1",
"text": "",
"options": ["b"] # Correct answer
},
{
"question_id": "q2",
"text": "",
"options": ["b"] # Correct answer
},
{
"question_id": "q3",
"text": "A list is mutable while a tuple is immutable.",
"options": []
}
]
test_application_text = Application(
id=str(uuid4()),
job_id=test_job.id,
assessment_id=test_assessment.id,
user_id=test_user.id,
answers=json.dumps(text_answers)
)
db.add(test_application_text)
db.commit()
# Test the score calculation with text answer
calculated_score_text = calculate_application_score(db, test_application_text.id)
print(f"Calculated score for application with text answer: {calculated_score_text}%")
# For text-based questions, we consider them correct if there's text content
# So this should be 100% (5 points from correct MCQs + 5 points from text answer out of 10 total)
expected_text_percentage = ((3 + 2 + 5) / (3 + 2 + 5)) * 100 # 100%
assert calculated_score_text == expected_text_percentage, f"Expected {expected_text_percentage}%, got {calculated_score_text}%"
print("[PASS] Score calculation is correct for application with text answer")
print("\nAll tests passed! Answers are correctly handled without being treated as separate models.")
finally:
db.close()
if __name__ == "__main__":
test_answer_handling() |