Kaadan commited on
Commit
1bf457e
·
2 Parent(s): 9bd6c72 b04c4ca

Merge branch 'master'

Browse files
backend/Dockerfile CHANGED
@@ -31,5 +31,10 @@ RUN chmod +x start.sh
31
  # Expose the port the app runs on
32
  EXPOSE 8000
33
 
34
- # Run the application (with migrations first)
35
- CMD ["./start.sh"]
 
 
 
 
 
 
31
  # Expose the port the app runs on
32
  EXPOSE 8000
33
 
34
+ # Create an entrypoint script to handle database seeding
35
+ COPY docker-entrypoint.sh /usr/local/bin/
36
+ RUN chmod +x /usr/local/bin/docker-entrypoint.sh
37
+
38
+ # Run the application
39
+ ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
40
+ CMD ["python", "main.py"]
backend/alembic/versions/c5ca6901e3cd_seed_applications_with_realistic_data.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Seed applications with realistic data
2
+
3
+ Revision ID: c5ca6901e3cd
4
+ Revises: 290ee4ce077e
5
+ Create Date: 2026-02-09 12:36:16.911612
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = 'c5ca6901e3cd'
16
+ down_revision: Union[str, Sequence[str], None] = '290ee4ce077e'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ # We don't need to make any schema changes here, just insert data
25
+ # The schema changes were already applied in the previous migration
26
+ # Applications were seeded separately using a script
27
+ pass
28
+ # ### end Alembic commands ###
29
+
30
+
31
+ def downgrade() -> None:
32
+ """Downgrade schema."""
33
+ # ### commands auto generated by Alembic - please adjust! ###
34
+ pass
35
+ # ### end Alembic commands ###
backend/db_seeder.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module to handle database seeding for the application.
3
+ This module provides functionality to seed the database with initial data
4
+ when the application starts for the first time.
5
+ """
6
+
7
+ import logging
8
+ from sqlalchemy import inspect
9
+ from models.user import User
10
+ from models.job import Job
11
+ from models.assessment import Assessment
12
+ from services.user_service import create_user
13
+ from services.job_service import create_job
14
+ from services.assessment_service import create_assessment
15
+ from services.application_service import create_application
16
+ from schemas.user import UserCreate
17
+ from schemas.job import JobCreate
18
+ from schemas.assessment import AssessmentCreate
19
+ from schemas.application import ApplicationCreate, ApplicationAnswer
20
+ from schemas.enums import UserRole, JobSeniority, QuestionType
21
+ from passlib.context import CryptContext
22
+ import json
23
+
24
+ # Create logger for this module
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # Password hashing context
28
+ pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
29
+
30
+ def hash_password(password: str) -> str:
31
+ """Hash a password using bcrypt."""
32
+ return pwd_context.hash(password)
33
+
34
+ def check_if_seeded(db) -> bool:
35
+ """Check if the database has already been seeded with initial data."""
36
+ inspector = inspect(db.bind)
37
+
38
+ # Check if we have any users in the database
39
+ user_count = db.query(User).count()
40
+
41
+ # If we have users, we consider the DB as seeded
42
+ # (in a real-world scenario, you might want a more sophisticated check)
43
+ return user_count > 0
44
+
45
+ def seed_database():
46
+ """Seed the database with initial data if it hasn't been seeded yet."""
47
+ from database.database import SessionLocal
48
+
49
+ # Create tables if they don't exist
50
+ from models.base import Base
51
+ from database.database import engine
52
+ Base.metadata.create_all(bind=engine)
53
+
54
+ db = SessionLocal()
55
+
56
+ try:
57
+ # Check if database is already seeded
58
+ if check_if_seeded(db):
59
+ logger.info("Database already seeded. Skipping seeding process.")
60
+ return
61
+
62
+ logger.info("Database is not seeded. Starting seeding process...")
63
+
64
+ # Create HR users
65
+ hr_users_data = [
66
+ {
67
+ "first_name": "Sarah",
68
+ "last_name": "Johnson",
69
+ "email": "sarah.johnson@demo.com",
70
+ "password": "password123",
71
+ "role": UserRole.hr
72
+ },
73
+ {
74
+ "first_name": "Michael",
75
+ "last_name": "Chen",
76
+ "email": "michael.chen@demo.com",
77
+ "password": "password123",
78
+ "role": UserRole.hr
79
+ },
80
+ {
81
+ "first_name": "Emma",
82
+ "last_name": "Rodriguez",
83
+ "email": "emma.rodriguez@demo.com",
84
+ "password": "password123",
85
+ "role": UserRole.hr
86
+ },
87
+ {
88
+ "first_name": "David",
89
+ "last_name": "Wilson",
90
+ "email": "david.wilson@demo.com",
91
+ "password": "password123",
92
+ "role": UserRole.hr
93
+ }
94
+ ]
95
+
96
+ hr_users = []
97
+ for user_data in hr_users_data:
98
+ user_create = UserCreate(
99
+ first_name=user_data["first_name"],
100
+ last_name=user_data["last_name"],
101
+ email=user_data["email"],
102
+ password=user_data["password"],
103
+ role=user_data["role"]
104
+ )
105
+ user = create_user(db, user_create)
106
+ hr_users.append(user)
107
+ logger.info(f"Created HR user: {user.email}")
108
+
109
+ # Create candidate users
110
+ candidate_users_data = [
111
+ {
112
+ "first_name": "Alex",
113
+ "last_name": "Thompson",
114
+ "email": "alex.thompson@demo.com",
115
+ "password": "password123",
116
+ "role": UserRole.applicant
117
+ },
118
+ {
119
+ "first_name": "Jessica",
120
+ "last_name": "Lee",
121
+ "email": "jessica.lee@demo.com",
122
+ "password": "password123",
123
+ "role": UserRole.applicant
124
+ },
125
+ {
126
+ "first_name": "Ryan",
127
+ "last_name": "Patel",
128
+ "email": "ryan.patel@demo.com",
129
+ "password": "password123",
130
+ "role": UserRole.applicant
131
+ },
132
+ {
133
+ "first_name": "Olivia",
134
+ "last_name": "Kim",
135
+ "email": "olivia.kim@demo.com",
136
+ "password": "password123",
137
+ "role": UserRole.applicant
138
+ }
139
+ ]
140
+
141
+ candidate_users = []
142
+ for user_data in candidate_users_data:
143
+ user_create = UserCreate(
144
+ first_name=user_data["first_name"],
145
+ last_name=user_data["last_name"],
146
+ email=user_data["email"],
147
+ password=user_data["password"],
148
+ role=user_data["role"]
149
+ )
150
+ user = create_user(db, user_create)
151
+ candidate_users.append(user)
152
+ logger.info(f"Created candidate user: {user.email}")
153
+
154
+ # Create sample jobs
155
+ jobs_data = [
156
+ {
157
+ "title": "Software Engineer",
158
+ "seniority": JobSeniority.mid,
159
+ "description": "Develop and maintain software applications using modern technologies.",
160
+ "skill_categories": ["python", "javascript", "sql", "rest-api"]
161
+ },
162
+ {
163
+ "title": "Senior Python Developer",
164
+ "seniority": JobSeniority.senior,
165
+ "description": "Lead development of complex Python applications and mentor junior developers.",
166
+ "skill_categories": ["python", "django", "flask", "postgresql", "aws"]
167
+ },
168
+ {
169
+ "title": "Junior Data Analyst",
170
+ "seniority": JobSeniority.junior,
171
+ "description": "Analyze data to provide insights and support business decisions.",
172
+ "skill_categories": ["sql", "python", "excel", "data-visualization"]
173
+ },
174
+ {
175
+ "title": "DevOps Engineer",
176
+ "seniority": JobSeniority.mid,
177
+ "description": "Manage infrastructure and deployment pipelines.",
178
+ "skill_categories": ["docker", "kubernetes", "aws", "ci-cd", "linux"]
179
+ }
180
+ ]
181
+
182
+ jobs = []
183
+ for job_data in jobs_data:
184
+ job_create = JobCreate(
185
+ title=job_data["title"],
186
+ seniority=job_data["seniority"],
187
+ description=job_data["description"],
188
+ skill_categories=job_data["skill_categories"]
189
+ )
190
+ job = create_job(db, job_create)
191
+ jobs.append(job)
192
+ logger.info(f"Created job: {job.title}")
193
+
194
+ # Create assessments for each job
195
+ assessment_templates = [
196
+ {
197
+ "title": "Programming Skills Assessment",
198
+ "passing_score": 60,
199
+ "questions_types": [QuestionType.choose_one, QuestionType.text_based]
200
+ },
201
+ {
202
+ "title": "Data Analysis Skills Assessment",
203
+ "passing_score": 65,
204
+ "questions_types": [QuestionType.choose_one, QuestionType.choose_many, QuestionType.text_based]
205
+ },
206
+ {
207
+ "title": "DevOps Practices Assessment",
208
+ "passing_score": 70,
209
+ "questions_types": [QuestionType.choose_one, QuestionType.text_based]
210
+ }
211
+ ]
212
+
213
+ assessments = []
214
+ for i, job in enumerate(jobs):
215
+ template = assessment_templates[i % len(assessment_templates)]
216
+ assessment_create = AssessmentCreate(
217
+ title=template["title"],
218
+ passing_score=template["passing_score"],
219
+ questions_types=template["questions_types"]
220
+ )
221
+ assessment = create_assessment(db, job.id, assessment_create)
222
+ assessments.append(assessment)
223
+ logger.info(f"Created assessment: {assessment.title} for job {job.title}")
224
+
225
+ # Create some sample applications with realistic answers and scores
226
+ # For each assessment, create applications from different candidates
227
+ for i, assessment in enumerate(assessments):
228
+ # Get the job for this assessment
229
+ job = next((j for j in jobs if j.id == assessment.job_id), None)
230
+ if not job:
231
+ continue
232
+
233
+ # Get questions from the assessment
234
+ try:
235
+ questions = json.loads(assessment.questions) if assessment.questions else []
236
+ except json.JSONDecodeError:
237
+ logger.warning(f"Could not parse questions for assessment {assessment.id}")
238
+ continue
239
+
240
+ if not questions:
241
+ continue
242
+
243
+ # Select a few candidate users to apply for this job
244
+ selected_candidates = candidate_users[:3] # Use first 3 candidates
245
+
246
+ for j, user in enumerate(selected_candidates):
247
+ # Generate answers for each question
248
+ answers = []
249
+
250
+ for k, question in enumerate(questions):
251
+ answer = {
252
+ "question_id": question["id"],
253
+ "text": None,
254
+ "options": []
255
+ }
256
+
257
+ if question["type"] == "choose_one":
258
+ # For multiple choice questions, vary correctness
259
+ if j == 0: # First candidate gets some wrong
260
+ answer["options"] = [question.get("correct_options", [])[0]] if question.get("correct_options") else []
261
+ elif j == 1: # Second candidate gets them mostly right
262
+ if k % 2 == 0: # Alternate correct/wrong
263
+ answer["options"] = [question.get("correct_options", [])[0]] if question.get("correct_options") else []
264
+ else:
265
+ # Pick a wrong answer
266
+ all_options = [opt["value"] for opt in question.get("options", [])]
267
+ correct_options = set(question.get("correct_options", []))
268
+ wrong_options = [opt for opt in all_options if opt not in correct_options]
269
+ if wrong_options:
270
+ answer["options"] = [wrong_options[0]]
271
+ else:
272
+ answer["options"] = [all_options[0]] if all_options else []
273
+ else: # Third candidate gets them right
274
+ answer["options"] = [question.get("correct_options", [])[0]] if question.get("correct_options") else []
275
+
276
+ elif question["type"] == "choose_many":
277
+ # For multiple select questions
278
+ if j == 0: # First candidate gets them wrong
279
+ answer["options"] = []
280
+ elif j == 1: # Second candidate gets them partially right
281
+ if k % 2 == 0:
282
+ answer["options"] = question.get("correct_options", [])
283
+ else:
284
+ # Mix of correct and incorrect options
285
+ all_options = [opt["value"] for opt in question.get("options", [])]
286
+ correct_options = set(question.get("correct_options", []))
287
+ answer["options"] = list(correct_options)[:1] # Just one correct option
288
+ else: # Third candidate gets them right
289
+ answer["options"] = question.get("correct_options", [])
290
+
291
+ elif question["type"] == "text_based":
292
+ # For text-based questions, generate more realistic answers based on the question
293
+ question_text_lower = question["text"].lower()
294
+
295
+ if "python" in question_text_lower:
296
+ if "framework" in question_text_lower or "web" in question_text_lower:
297
+ answer["text"] = "Python frameworks like Django and Flask are commonly used for web development. Django is a high-level framework that encourages rapid development and clean design."
298
+ elif "difference" in question_text_lower or "between" in question_text_lower:
299
+ answer["text"] = "The main differences between lists and tuples in Python are mutability. Lists are mutable (can be changed after creation) while tuples are immutable (cannot be changed after creation). Lists use square brackets [] while tuples use parentheses ()."
300
+ elif "advantage" in question_text_lower or "benefit" in question_text_lower:
301
+ answer["text"] = "One advantage of Python is its readability and simplicity. The syntax is clean and easy to understand, making it great for beginners. It also has a large ecosystem of libraries and frameworks."
302
+ else:
303
+ answer["text"] = f"Regarding the question '{question['text']}', this is a typical Python concept that involves understanding data structures, syntax, or best practices."
304
+ elif "database" in question_text_lower or "sql" in question_text_lower:
305
+ answer["text"] = "SQL databases are relational and use structured query language for defining and manipulating data. They're ideal for complex queries and ACID transactions. Common examples include PostgreSQL, MySQL, and SQLite."
306
+ elif "api" in question_text_lower:
307
+ answer["text"] = "An API (Application Programming Interface) is a set of rules that allows programs to talk to each other. REST APIs use HTTP requests to GET, PUT, POST and DELETE data. They're stateless and cacheable."
308
+ else:
309
+ answer["text"] = f"This is my response to the question: {question['text']}. It requires understanding of the relevant concepts and applying best practices."
310
+
311
+ answers.append(ApplicationAnswer(**answer))
312
+
313
+ # Create the application
314
+ application_create = ApplicationCreate(
315
+ job_id=job.id,
316
+ assessment_id=assessment.id,
317
+ user_id=user.id,
318
+ answers=answers
319
+ )
320
+
321
+ application = create_application(db, application_create)
322
+ logger.info(f"Created application for user {user.email} on job '{job.title}' with assessment '{assessment.title}'")
323
+
324
+ logger.info("Database seeding completed successfully!")
325
+
326
+ except Exception as e:
327
+ logger.error(f"Error during database seeding: {e}")
328
+ db.rollback()
329
+ raise
330
+ finally:
331
+ db.close()
backend/docker-compose.yml CHANGED
@@ -22,6 +22,7 @@ services:
22
  - APP_NAME=AI-Powered Hiring Assessment Platform
23
  - APP_VERSION=0.1.0
24
  - APP_DESCRIPTION=MVP for managing hiring assessments using AI
 
25
  networks:
26
  - app-network
27
 
 
22
  - APP_NAME=AI-Powered Hiring Assessment Platform
23
  - APP_VERSION=0.1.0
24
  - APP_DESCRIPTION=MVP for managing hiring assessments using AI
25
+ - MISTRAL_API_KEY=${MISTRAL_API_KEY}
26
  networks:
27
  - app-network
28
 
backend/docker-entrypoint.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Run database seeding
5
+ echo "Running database seeding..."
6
+ python db_seeder.py
7
+
8
+ # Then exec the container's main process (what's specified as CMD in the Dockerfile)
9
+ exec "$@"
backend/main.py CHANGED
@@ -5,6 +5,7 @@ from fastapi.middleware.cors import CORSMiddleware
5
  from fastapi.staticfiles import StaticFiles
6
  from fastapi.responses import FileResponse
7
  import logging
 
8
 
9
  from database.database import engine
10
  from models import Base
@@ -14,23 +15,38 @@ from api.job_routes import router as job_router
14
  from api.assessment_routes import router as assessment_router
15
  from api.application_routes import router as application_router
16
  from config import settings
 
 
17
 
18
- # ----------------------------
19
- # Logging configuration
20
- # ----------------------------
21
  logging.basicConfig(level=settings.log_level)
22
  logger = logging.getLogger(__name__)
 
23
 
24
- # ----------------------------
25
- # Environment variables
26
- # ----------------------------
27
  PORT = int(os.environ.get("PORT", 7860))
28
  HOST = os.environ.get("HOST", "0.0.0.0")
29
  DEBUG = os.getenv("DEBUG", str(settings.debug)).lower() == "true"
30
 
31
- # ----------------------------
32
- # Initialize FastAPI
33
- # ----------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  app = FastAPI(
35
  title=settings.app_name,
36
  description=settings.app_description,
 
5
  from fastapi.staticfiles import StaticFiles
6
  from fastapi.responses import FileResponse
7
  import logging
8
+ from contextlib import asynccontextmanager
9
 
10
  from database.database import engine
11
  from models import Base
 
15
  from api.assessment_routes import router as assessment_router
16
  from api.application_routes import router as application_router
17
  from config import settings
18
+ from logging_config import get_logger
19
+ from db_seeder import seed_database
20
 
21
+ # Create logger for this module
 
 
22
  logging.basicConfig(level=settings.log_level)
23
  logger = logging.getLogger(__name__)
24
+ logger = get_logger(__name__)
25
 
 
 
 
26
  PORT = int(os.environ.get("PORT", 7860))
27
  HOST = os.environ.get("HOST", "0.0.0.0")
28
  DEBUG = os.getenv("DEBUG", str(settings.debug)).lower() == "true"
29
 
30
+
31
+ @asynccontextmanager
32
+ async def lifespan(app: FastAPI):
33
+ """Handle startup and shutdown events"""
34
+ # Startup
35
+ logger.info(f"Starting {settings.app_name} v{settings.app_version}")
36
+ logger.info(f"Database URL: {settings.database_url}")
37
+
38
+ # Seed the database if it's empty
39
+ logger.info("Checking if database needs seeding...")
40
+ seed_database()
41
+ logger.info("Database seeding check completed")
42
+
43
+ logger.info("Application started successfully")
44
+
45
+ yield
46
+ # Shutdown
47
+ logger.info("Application shutting down")
48
+
49
+ # Initialize FastAPI app with settings
50
  app = FastAPI(
51
  title=settings.app_name,
52
  description=settings.app_description,
backend/services/ai_service.py CHANGED
@@ -2,12 +2,29 @@ import re
2
  from typing import List, Dict, Any
3
  from schemas.assessment import AssessmentQuestion
4
  from schemas.application import ApplicationAnswerWithQuestion
5
- from integrations.ai_integration.ai_factory import AIGeneratorFactory, DEFAULT_PROVIDER
6
  from logging_config import get_logger
 
7
 
8
  # Create logger for this module
9
  logger = get_logger(__name__)
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def generate_questions(title: str, questions_types: List[str], additional_note: str = None, job_info: dict = None, provider=None) -> List[AssessmentQuestion]:
12
  """
13
  Generate questions based on the assessment title, job information, and specified question types.
@@ -17,16 +34,16 @@ def generate_questions(title: str, questions_types: List[str], additional_note:
17
  questions_types: List of question types to generate (choose_one, choose_many, text_based)
18
  additional_note: Additional information to guide question generation
19
  job_info: Information about the job the assessment is for
20
- provider: The AI provider to use (defaults to the default provider)
21
 
22
  Returns:
23
  List of generated AssessmentQuestion objects
24
  """
25
  logger.info(f"Generating questions for assessment: '{title}' with types: {questions_types}")
26
 
27
- # Use the default provider if none is specified
28
  if provider is None:
29
- provider = DEFAULT_PROVIDER
30
 
31
  # Get the AI generator from the factory
32
  ai_generator = AIGeneratorFactory.create_generator(provider)
@@ -50,7 +67,7 @@ def score_answer(question: AssessmentQuestion, answer_text: str, selected_option
50
  question: The question being answered
51
  answer_text: The text of the answer (for text-based questions)
52
  selected_options: Selected options (for multiple choice questions)
53
- provider: The AI provider to use (defaults to the default provider)
54
 
55
  Returns:
56
  Dictionary containing score information:
@@ -60,11 +77,11 @@ def score_answer(question: AssessmentQuestion, answer_text: str, selected_option
60
  'correct': bool # Whether the answer is correct
61
  }
62
  """
63
- logger.info(f"Scoring answer for question: '{question.text[:50]}...' using {provider.value if provider else DEFAULT_PROVIDER.value} provider")
64
 
65
- # Use the default provider if none is specified
66
  if provider is None:
67
- provider = DEFAULT_PROVIDER
68
 
69
  # Get the AI generator from the factory
70
  ai_generator = AIGeneratorFactory.create_generator(provider)
@@ -88,16 +105,16 @@ def estimate_assessment_duration(title: str, job_info: dict, questions: List[Ass
88
  job_info: Information about the job the assessment is for
89
  questions: List of questions in the assessment
90
  additional_note: Additional information about the assessment
91
- provider: The AI provider to use (defaults to the default provider)
92
 
93
  Returns:
94
  Estimated duration in minutes
95
  """
96
  logger.info(f"Estimating duration for assessment: '{title}' with {len(questions)} questions")
97
 
98
- # Use the default provider if none is specified
99
  if provider is None:
100
- provider = DEFAULT_PROVIDER
101
 
102
  # Get the AI generator from the factory
103
  ai_generator = AIGeneratorFactory.create_generator(provider)
@@ -142,11 +159,11 @@ especially for senior roles or text-based questions requiring detailed responses
142
  duration_match = re.search(r'\d+', duration_estimate)
143
  if duration_match:
144
  duration_minutes = int(duration_match.group())
145
-
146
  # Apply our own logic to ensure minimum duration per question and adjust based on job seniority
147
  # Calculate base duration (at least 2 minutes per question)
148
  base_duration = len(questions) * 2
149
-
150
  # Adjust based on job seniority
151
  seniority = job_info.get('seniority', '').lower()
152
  if seniority in ['senior', 'lead']:
@@ -154,16 +171,16 @@ especially for senior roles or text-based questions requiring detailed responses
154
  elif seniority in ['mid', 'intermediate']:
155
  base_duration = int(base_duration * 1.2) # 20% more time for mid-level roles
156
  # Junior/intern roles get the base time (2 min per question)
157
-
158
  # Adjust based on question complexity (text-based questions take more time)
159
  text_questions = sum(1 for q in questions if q.type == 'text_based')
160
  if text_questions > 0:
161
  # Add extra time for text-based questions (they typically take longer)
162
  base_duration += text_questions * 2 # Additional 2 minutes per text question
163
-
164
  # Take the maximum of AI estimation and our calculated minimum
165
  duration_minutes = max(duration_minutes, base_duration)
166
-
167
  # Ensure the duration is within reasonable bounds (5-180 minutes)
168
  duration_minutes = max(5, min(180, duration_minutes))
169
  logger.info(f"Estimated duration for assessment '{title}': {duration_minutes} minutes (AI: {int(duration_match.group())}, calculated min: {base_duration})")
@@ -172,21 +189,21 @@ especially for senior roles or text-based questions requiring detailed responses
172
  # If no number is found in the response, return a default duration based on question count
173
  # Calculate base duration (at least 2 minutes per question)
174
  base_duration = len(questions) * 2
175
-
176
  # Adjust based on job seniority
177
  seniority = job_info.get('seniority', '').lower()
178
  if seniority in ['senior', 'lead']:
179
  base_duration = int(base_duration * 1.5) # 50% more time for senior roles
180
  elif seniority in ['mid', 'intermediate']:
181
  base_duration = int(base_duration * 1.2) # 20% more time for mid-level roles
182
-
183
  # Adjust based on question complexity (text-based questions take more time)
184
  text_questions = sum(1 for q in questions if q.type == 'text_based')
185
  if text_questions > 0:
186
  base_duration += text_questions * 2 # Additional 2 minutes per text question
187
-
188
  # Ensure minimum duration is at least 5 minutes
189
  default_duration = max(5, base_duration)
190
-
191
  logger.warning(f"No duration found in AI response. Using calculated default: {default_duration} minutes")
192
  return default_duration
 
2
  from typing import List, Dict, Any
3
  from schemas.assessment import AssessmentQuestion
4
  from schemas.application import ApplicationAnswerWithQuestion
5
+ from integrations.ai_integration.ai_factory import AIGeneratorFactory, AIProvider, DEFAULT_PROVIDER
6
  from logging_config import get_logger
7
+ from config import settings
8
 
9
  # Create logger for this module
10
  logger = get_logger(__name__)
11
 
12
+ def get_current_provider():
13
+ """Get the current AI provider from settings"""
14
+ try:
15
+ provider_value = settings.ai_provider.lower()
16
+ # Map string values to enum
17
+ provider_map = {
18
+ 'mock': AIProvider.MOCK,
19
+ 'openai': AIProvider.OPENAI,
20
+ 'anthropic': AIProvider.ANTHROPIC,
21
+ 'google': AIProvider.GOOGLE,
22
+ 'mistral': AIProvider.MISTRAL
23
+ }
24
+ return provider_map.get(provider_value, DEFAULT_PROVIDER)
25
+ except:
26
+ return DEFAULT_PROVIDER
27
+
28
  def generate_questions(title: str, questions_types: List[str], additional_note: str = None, job_info: dict = None, provider=None) -> List[AssessmentQuestion]:
29
  """
30
  Generate questions based on the assessment title, job information, and specified question types.
 
34
  questions_types: List of question types to generate (choose_one, choose_many, text_based)
35
  additional_note: Additional information to guide question generation
36
  job_info: Information about the job the assessment is for
37
+ provider: The AI provider to use (defaults to the provider from settings)
38
 
39
  Returns:
40
  List of generated AssessmentQuestion objects
41
  """
42
  logger.info(f"Generating questions for assessment: '{title}' with types: {questions_types}")
43
 
44
+ # Use the provider from settings if none is specified
45
  if provider is None:
46
+ provider = get_current_provider()
47
 
48
  # Get the AI generator from the factory
49
  ai_generator = AIGeneratorFactory.create_generator(provider)
 
67
  question: The question being answered
68
  answer_text: The text of the answer (for text-based questions)
69
  selected_options: Selected options (for multiple choice questions)
70
+ provider: The AI provider to use (defaults to the provider from settings)
71
 
72
  Returns:
73
  Dictionary containing score information:
 
77
  'correct': bool # Whether the answer is correct
78
  }
79
  """
80
+ logger.info(f"Scoring answer for question: '{question.text[:50]}...' using {provider.value if provider else get_current_provider().value} provider")
81
 
82
+ # Use the provider from settings if none is specified
83
  if provider is None:
84
+ provider = get_current_provider()
85
 
86
  # Get the AI generator from the factory
87
  ai_generator = AIGeneratorFactory.create_generator(provider)
 
105
  job_info: Information about the job the assessment is for
106
  questions: List of questions in the assessment
107
  additional_note: Additional information about the assessment
108
+ provider: The AI provider to use (defaults to the provider from settings)
109
 
110
  Returns:
111
  Estimated duration in minutes
112
  """
113
  logger.info(f"Estimating duration for assessment: '{title}' with {len(questions)} questions")
114
 
115
+ # Use the provider from settings if none is specified
116
  if provider is None:
117
+ provider = get_current_provider()
118
 
119
  # Get the AI generator from the factory
120
  ai_generator = AIGeneratorFactory.create_generator(provider)
 
159
  duration_match = re.search(r'\d+', duration_estimate)
160
  if duration_match:
161
  duration_minutes = int(duration_match.group())
162
+
163
  # Apply our own logic to ensure minimum duration per question and adjust based on job seniority
164
  # Calculate base duration (at least 2 minutes per question)
165
  base_duration = len(questions) * 2
166
+
167
  # Adjust based on job seniority
168
  seniority = job_info.get('seniority', '').lower()
169
  if seniority in ['senior', 'lead']:
 
171
  elif seniority in ['mid', 'intermediate']:
172
  base_duration = int(base_duration * 1.2) # 20% more time for mid-level roles
173
  # Junior/intern roles get the base time (2 min per question)
174
+
175
  # Adjust based on question complexity (text-based questions take more time)
176
  text_questions = sum(1 for q in questions if q.type == 'text_based')
177
  if text_questions > 0:
178
  # Add extra time for text-based questions (they typically take longer)
179
  base_duration += text_questions * 2 # Additional 2 minutes per text question
180
+
181
  # Take the maximum of AI estimation and our calculated minimum
182
  duration_minutes = max(duration_minutes, base_duration)
183
+
184
  # Ensure the duration is within reasonable bounds (5-180 minutes)
185
  duration_minutes = max(5, min(180, duration_minutes))
186
  logger.info(f"Estimated duration for assessment '{title}': {duration_minutes} minutes (AI: {int(duration_match.group())}, calculated min: {base_duration})")
 
189
  # If no number is found in the response, return a default duration based on question count
190
  # Calculate base duration (at least 2 minutes per question)
191
  base_duration = len(questions) * 2
192
+
193
  # Adjust based on job seniority
194
  seniority = job_info.get('seniority', '').lower()
195
  if seniority in ['senior', 'lead']:
196
  base_duration = int(base_duration * 1.5) # 50% more time for senior roles
197
  elif seniority in ['mid', 'intermediate']:
198
  base_duration = int(base_duration * 1.2) # 20% more time for mid-level roles
199
+
200
  # Adjust based on question complexity (text-based questions take more time)
201
  text_questions = sum(1 for q in questions if q.type == 'text_based')
202
  if text_questions > 0:
203
  base_duration += text_questions * 2 # Additional 2 minutes per text question
204
+
205
  # Ensure minimum duration is at least 5 minutes
206
  default_duration = max(5, base_duration)
207
+
208
  logger.warning(f"No duration found in AI response. Using calculated default: {default_duration} minutes")
209
  return default_duration