Kaadan commited on
Commit
b67657a
·
1 Parent(s): cfe866a

suggest duration by AI

Browse files
backend/integrations/ai_integration/ai_generator_interface.py CHANGED
@@ -54,4 +54,20 @@ class AIGeneratorInterface(ABC):
54
  'correct': bool # Whether the answer is correct
55
  }
56
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  pass
 
54
  'correct': bool # Whether the answer is correct
55
  }
56
  """
57
+ pass
58
+
59
+ @abstractmethod
60
+ def estimate_duration(
61
+ self,
62
+ prompt: str
63
+ ) -> str:
64
+ """
65
+ Estimate the duration for an assessment based on a prompt.
66
+
67
+ Args:
68
+ prompt: A detailed prompt describing the assessment
69
+
70
+ Returns:
71
+ String response from the AI containing the estimated duration
72
+ """
73
  pass
backend/integrations/ai_integration/anthropic_generator.py CHANGED
@@ -36,4 +36,21 @@ class AnthropicGenerator(AIGeneratorInterface):
36
  """
37
  # In a real implementation, this would call the Anthropic API
38
  # For now, we'll raise an exception indicating it's not implemented
39
- raise NotImplementedError("Anthropic answer scoring not yet implemented")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  """
37
  # In a real implementation, this would call the Anthropic API
38
  # For now, we'll raise an exception indicating it's not implemented
39
+ raise NotImplementedError("Anthropic answer scoring not yet implemented")
40
+
41
+ def estimate_duration(
42
+ self,
43
+ prompt: str
44
+ ) -> str:
45
+ """
46
+ Estimate the duration for an assessment based on a prompt.
47
+
48
+ Args:
49
+ prompt: A detailed prompt describing the assessment
50
+
51
+ Returns:
52
+ String response from the AI containing the estimated duration
53
+ """
54
+ # In a real implementation, this would call the Anthropic API
55
+ # For now, we'll raise an exception indicating it's not implemented
56
+ raise NotImplementedError("Anthropic duration estimation not yet implemented")
backend/integrations/ai_integration/google_ai_generator.py CHANGED
@@ -36,4 +36,21 @@ class GoogleAIGenerator(AIGeneratorInterface):
36
  """
37
  # In a real implementation, this would call the Google AI API
38
  # For now, we'll raise an exception indicating it's not implemented
39
- raise NotImplementedError("Google AI answer scoring not yet implemented")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  """
37
  # In a real implementation, this would call the Google AI API
38
  # For now, we'll raise an exception indicating it's not implemented
39
+ raise NotImplementedError("Google AI answer scoring not yet implemented")
40
+
41
+ def estimate_duration(
42
+ self,
43
+ prompt: str
44
+ ) -> str:
45
+ """
46
+ Estimate the duration for an assessment based on a prompt.
47
+
48
+ Args:
49
+ prompt: A detailed prompt describing the assessment
50
+
51
+ Returns:
52
+ String response from the AI containing the estimated duration
53
+ """
54
+ # In a real implementation, this would call the Google AI API
55
+ # For now, we'll raise an exception indicating it's not implemented
56
+ raise NotImplementedError("Google AI duration estimation not yet implemented")
backend/integrations/ai_integration/mistral_generator.py CHANGED
@@ -278,3 +278,30 @@ Return ONLY the JSON array.
278
  assessment_questions.append(question)
279
 
280
  return assessment_questions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  assessment_questions.append(question)
279
 
280
  return assessment_questions
281
+
282
+ def estimate_duration(
283
+ self,
284
+ prompt: str
285
+ ) -> str:
286
+ """
287
+ Estimate the duration for an assessment based on a prompt.
288
+
289
+ Args:
290
+ prompt: A detailed prompt describing the assessment
291
+
292
+ Returns:
293
+ String response from the AI containing the estimated duration
294
+ """
295
+ messages = [
296
+ {"role": "system", "content": "You estimate assessment durations. Respond with only a number representing minutes."},
297
+ {"role": "user", "content": prompt},
298
+ ]
299
+
300
+ response = self.client.chat.complete(
301
+ model="mistral-small-latest",
302
+ messages=messages,
303
+ temperature=0.2,
304
+ )
305
+
306
+ content = response.choices[0].message.content
307
+ return content
backend/integrations/ai_integration/mock_ai_generator.py CHANGED
@@ -375,4 +375,32 @@ class MockAIGenerator(AIGeneratorInterface):
375
  score += 0.2
376
 
377
  # Cap the score at 1.0
378
- return min(score, 1.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
  score += 0.2
376
 
377
  # Cap the score at 1.0
378
+ return min(score, 1.0)
379
+
380
+ def estimate_duration(
381
+ self,
382
+ prompt: str
383
+ ) -> str:
384
+ """
385
+ Estimate the duration for an assessment based on a prompt.
386
+
387
+ Args:
388
+ prompt: A detailed prompt describing the assessment
389
+
390
+ Returns:
391
+ String response from the AI containing the estimated duration
392
+ """
393
+ # For the mock implementation, we'll return a simple response with a number
394
+ # based on the length of the prompt and keywords
395
+ import random
396
+
397
+ # Count the number of questions mentioned in the prompt
398
+ question_count = prompt.count("Question ") # Count occurrences of "Question "
399
+
400
+ # Estimate duration based on question count (3 minutes per question, with some randomness)
401
+ estimated_minutes = max(5, question_count * 3 + random.randint(-2, 5))
402
+
403
+ # Ensure it's within reasonable bounds
404
+ estimated_minutes = min(180, max(5, estimated_minutes))
405
+
406
+ return f"{estimated_minutes} minutes"
backend/integrations/ai_integration/openai_generator.py CHANGED
@@ -36,4 +36,21 @@ class OpenAIGenerator(AIGeneratorInterface):
36
  """
37
  # In a real implementation, this would call the OpenAI API
38
  # For now, we'll raise an exception indicating it's not implemented
39
- raise NotImplementedError("OpenAI answer scoring not yet implemented")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  """
37
  # In a real implementation, this would call the OpenAI API
38
  # For now, we'll raise an exception indicating it's not implemented
39
+ raise NotImplementedError("OpenAI answer scoring not yet implemented")
40
+
41
+ def estimate_duration(
42
+ self,
43
+ prompt: str
44
+ ) -> str:
45
+ """
46
+ Estimate the duration for an assessment based on a prompt.
47
+
48
+ Args:
49
+ prompt: A detailed prompt describing the assessment
50
+
51
+ Returns:
52
+ String response from the AI containing the estimated duration
53
+ """
54
+ # In a real implementation, this would call the OpenAI API
55
+ # For now, we'll raise an exception indicating it's not implemented
56
+ raise NotImplementedError("OpenAI duration estimation not yet implemented")
backend/schemas/assessment.py CHANGED
@@ -25,14 +25,12 @@ class AssessmentBase(BaseSchema):
25
 
26
  class AssessmentCreate(BaseModel):
27
  title: str = Field(..., min_length=1, max_length=200)
28
- duration: Optional[int] = Field(None, ge=1) # Duration in seconds, if provided should be positive
29
  passing_score: int = Field(..., ge=20, le=80) # range 20-80
30
  questions_types: List[QuestionType] # array of enum(choose_one, choose_many, text_based)
31
  additional_note: Optional[str] = Field(None, max_length=500)
32
 
33
  class AssessmentUpdate(BaseModel):
34
  title: Optional[str] = Field(None, min_length=1, max_length=200)
35
- duration: Optional[int] = Field(None, ge=1) # Duration in seconds, if provided should be positive
36
  passing_score: Optional[int] = Field(None, ge=20, le=80) # range 20-80
37
  questions: Optional[List[AssessmentQuestion]] = None
38
  active: Optional[bool] = None
 
25
 
26
  class AssessmentCreate(BaseModel):
27
  title: str = Field(..., min_length=1, max_length=200)
 
28
  passing_score: int = Field(..., ge=20, le=80) # range 20-80
29
  questions_types: List[QuestionType] # array of enum(choose_one, choose_many, text_based)
30
  additional_note: Optional[str] = Field(None, max_length=500)
31
 
32
  class AssessmentUpdate(BaseModel):
33
  title: Optional[str] = Field(None, min_length=1, max_length=200)
 
34
  passing_score: Optional[int] = Field(None, ge=20, le=80) # range 20-80
35
  questions: Optional[List[AssessmentQuestion]] = None
36
  active: Optional[bool] = None
backend/services/__init__.py CHANGED
@@ -42,6 +42,10 @@ from .application_service import (
42
  calculate_application_score
43
  )
44
 
 
 
 
 
45
  __all__ = [
46
  "get_user",
47
  "get_user_by_email",
@@ -71,5 +75,6 @@ __all__ = [
71
  "create_application",
72
  "update_application",
73
  "delete_application",
74
- "calculate_application_score"
 
75
  ]
 
42
  calculate_application_score
43
  )
44
 
45
+ from .ai_service import (
46
+ estimate_assessment_duration
47
+ )
48
+
49
  __all__ = [
50
  "get_user",
51
  "get_user_by_email",
 
75
  "create_application",
76
  "update_application",
77
  "delete_application",
78
+ "calculate_application_score",
79
+ "estimate_assessment_duration"
80
  ]
backend/services/ai_service.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from typing import List, Dict, Any
2
  from schemas.assessment import AssessmentQuestion
3
  from schemas.application import ApplicationAnswerWithQuestion
@@ -76,4 +77,73 @@ def score_answer(question: AssessmentQuestion, answer_text: str, selected_option
76
  )
77
 
78
  logger.info(f"Scored answer with score: {score_result['score']}, correct: {score_result['correct']}")
79
- return score_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
  from typing import List, Dict, Any
3
  from schemas.assessment import AssessmentQuestion
4
  from schemas.application import ApplicationAnswerWithQuestion
 
77
  )
78
 
79
  logger.info(f"Scored answer with score: {score_result['score']}, correct: {score_result['correct']}")
80
+ return score_result
81
+
82
+ def estimate_assessment_duration(title: str, job_info: dict, questions: List[AssessmentQuestion], additional_note: str = None, provider=None) -> int:
83
+ """
84
+ Estimate the duration needed for an assessment based on its details and questions.
85
+
86
+ Args:
87
+ title: The title of the assessment
88
+ job_info: Information about the job the assessment is for
89
+ questions: List of questions in the assessment
90
+ additional_note: Additional information about the assessment
91
+ provider: The AI provider to use (defaults to the default provider)
92
+
93
+ Returns:
94
+ Estimated duration in minutes
95
+ """
96
+ logger.info(f"Estimating duration for assessment: '{title}' with {len(questions)} questions")
97
+
98
+ # Use the default provider if none is specified
99
+ if provider is None:
100
+ provider = DEFAULT_PROVIDER
101
+
102
+ # Get the AI generator from the factory
103
+ ai_generator = AIGeneratorFactory.create_generator(provider)
104
+
105
+ # Prepare the prompt for the AI
106
+ prompt = f"""
107
+ Based on the following assessment details, estimate how many minutes a candidate would need to complete this assessment.
108
+ Consider the complexity of the questions and the job requirements.
109
+
110
+ Assessment Title: {title}
111
+
112
+ Job Information:
113
+ - Title: {job_info.get('title', 'N/A')}
114
+ - Seniority: {job_info.get('seniority', 'N/A')}
115
+ - Description: {job_info.get('description', 'N/A')}
116
+ - Skill Categories: {', '.join(job_info.get('skill_categories', []))}
117
+
118
+ Questions Count: {len(questions)}
119
+ """
120
+
121
+ # Add question details to the prompt
122
+ for i, question in enumerate(questions[:5]): # Limit to first 5 questions to avoid overly long prompts
123
+ prompt += f"\nQuestion {i+1} ({question.type}): {question.text[:100]}..."
124
+ if question.type == 'text_based':
125
+ prompt += " (Text-based question requiring written response)"
126
+ elif question.type in ['choose_one', 'choose_many']:
127
+ prompt += f" (Multiple choice with {len(question.options)} options)"
128
+
129
+ if additional_note:
130
+ prompt += f"\nAdditional Notes: {additional_note}"
131
+
132
+ prompt += "\n\nPlease provide only a number representing the estimated duration in minutes."
133
+
134
+ # Get the AI's estimation
135
+ duration_estimate = ai_generator.estimate_duration(prompt)
136
+
137
+ # Extract the first number from the response using regex
138
+ duration_match = re.search(r'\d+', duration_estimate)
139
+ if duration_match:
140
+ duration_minutes = int(duration_match.group())
141
+ # Ensure the duration is within reasonable bounds (1-180 minutes)
142
+ duration_minutes = max(1, min(180, duration_minutes))
143
+ logger.info(f"Estimated duration for assessment '{title}': {duration_minutes} minutes")
144
+ return duration_minutes
145
+ else:
146
+ # If no number is found in the response, return a default duration based on question count
147
+ default_duration = min(60, max(5, len(questions) * 3)) # 3 minutes per question, capped at 60
148
+ logger.warning(f"No duration found in AI response. Using default: {default_duration} minutes")
149
+ return default_duration
backend/services/assessment_service.py CHANGED
@@ -67,11 +67,20 @@ def create_assessment(db: Session, job_id: str, assessment: AssessmentCreate) ->
67
  # Convert the generated questions to JSON
68
  questions_json = json.dumps([q.model_dump() for q in generated_questions])
69
 
 
 
 
 
 
 
 
 
 
70
  db_assessment = Assessment(
71
  id=str(uuid.uuid4()),
72
  job_id=job_id,
73
  title=assessment.title,
74
- duration=getattr(assessment, 'duration', None), # Include duration if available
75
  passing_score=assessment.passing_score,
76
  questions=questions_json, # Store as JSON string
77
  active=True
@@ -92,12 +101,76 @@ def update_assessment(db: Session, assessment_id: str, **kwargs) -> Optional[Ass
92
  if isinstance(value, list):
93
  # Value is already a JSON string if coming from regenerate_assessment
94
  setattr(db_assessment, key, json.dumps([q.model_dump() if hasattr(q, 'model_dump') else q for q in value]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  elif isinstance(value, str):
96
  # Value is already a JSON string
97
  setattr(db_assessment, key, value)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  else:
99
  # Handle other cases
100
  setattr(db_assessment, key, json.dumps(value))
 
 
 
101
  else:
102
  setattr(db_assessment, key, value)
103
  db.commit()
@@ -149,6 +222,19 @@ def regenerate_assessment(db: Session, assessment_id: str, **kwargs) -> Optional
149
 
150
  # Update the kwargs to use the generated questions
151
  kwargs['questions'] = questions_json
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  # Remove questions_types from kwargs as it's not a field in the Assessment model
153
  del kwargs['questions_types']
154
 
 
67
  # Convert the generated questions to JSON
68
  questions_json = json.dumps([q.model_dump() for q in generated_questions])
69
 
70
+ # Estimate the duration using AI
71
+ from services.ai_service import estimate_assessment_duration
72
+ duration = estimate_assessment_duration(
73
+ title=assessment.title,
74
+ job_info=job_info,
75
+ questions=generated_questions,
76
+ additional_note=assessment.additional_note
77
+ )
78
+
79
  db_assessment = Assessment(
80
  id=str(uuid.uuid4()),
81
  job_id=job_id,
82
  title=assessment.title,
83
+ duration=duration,
84
  passing_score=assessment.passing_score,
85
  questions=questions_json, # Store as JSON string
86
  active=True
 
101
  if isinstance(value, list):
102
  # Value is already a JSON string if coming from regenerate_assessment
103
  setattr(db_assessment, key, json.dumps([q.model_dump() if hasattr(q, 'model_dump') else q for q in value]))
104
+
105
+ # If questions are being updated, recalculate the duration using AI
106
+ from services.ai_service import estimate_assessment_duration
107
+ from models.job import Job
108
+ import json
109
+
110
+ # Get the job information
111
+ job = db.query(Job).filter(Job.id == db_assessment.job_id).first()
112
+ job_info = {}
113
+ if job:
114
+ job_info = {
115
+ "title": job.title,
116
+ "seniority": job.seniority,
117
+ "description": job.description,
118
+ "skill_categories": json.loads(job.skill_categories) if job.skill_categories else []
119
+ }
120
+
121
+ # Parse the questions to pass to the AI
122
+ questions = [q for q in value]
123
+
124
+ # Estimate new duration based on updated questions
125
+ duration = estimate_assessment_duration(
126
+ title=db_assessment.title,
127
+ job_info=job_info,
128
+ questions=questions,
129
+ additional_note=None # Use None or get from somewhere if available
130
+ )
131
+ setattr(db_assessment, 'duration', duration)
132
  elif isinstance(value, str):
133
  # Value is already a JSON string
134
  setattr(db_assessment, key, value)
135
+
136
+ # If questions are being updated as a JSON string, parse them to estimate duration
137
+ try:
138
+ parsed_questions = json.loads(value)
139
+ from schemas.assessment import AssessmentQuestion
140
+ from services.ai_service import estimate_assessment_duration
141
+ from models.job import Job
142
+
143
+ # Get the job information
144
+ job = db.query(Job).filter(Job.id == db_assessment.job_id).first()
145
+ job_info = {}
146
+ if job:
147
+ job_info = {
148
+ "title": job.title,
149
+ "seniority": job.seniority,
150
+ "description": job.description,
151
+ "skill_categories": json.loads(job.skill_categories) if job.skill_categories else []
152
+ }
153
+
154
+ # Convert parsed questions to AssessmentQuestion objects
155
+ questions = [AssessmentQuestion(**q) for q in parsed_questions]
156
+
157
+ # Estimate new duration based on updated questions
158
+ duration = estimate_assessment_duration(
159
+ title=db_assessment.title,
160
+ job_info=job_info,
161
+ questions=questions,
162
+ additional_note=None # Use None or get from somewhere if available
163
+ )
164
+ setattr(db_assessment, 'duration', duration)
165
+ except Exception as e:
166
+ logger.warning(f"Could not estimate duration from JSON questions: {str(e)}")
167
+ # If parsing fails, we'll skip duration recalculation
168
  else:
169
  # Handle other cases
170
  setattr(db_assessment, key, json.dumps(value))
171
+ elif key == 'duration':
172
+ # Skip setting duration since it's handled by AI
173
+ continue
174
  else:
175
  setattr(db_assessment, key, value)
176
  db.commit()
 
222
 
223
  # Update the kwargs to use the generated questions
224
  kwargs['questions'] = questions_json
225
+
226
+ # Estimate the duration using AI
227
+ from services.ai_service import estimate_assessment_duration
228
+ # Get the original assessment to access its title
229
+ original_assessment = get_assessment(db, assessment_id)
230
+ duration = estimate_assessment_duration(
231
+ title=original_assessment.title,
232
+ job_info=job_info,
233
+ questions=generated_questions,
234
+ additional_note=additional_note
235
+ )
236
+ kwargs['duration'] = duration
237
+
238
  # Remove questions_types from kwargs as it's not a field in the Assessment model
239
  del kwargs['questions_types']
240