Kaadan commited on
Commit
d359443
·
1 Parent(s): 2068bb0

fix errors and adding mistral ai

Browse files
backend/.env.example CHANGED
@@ -21,7 +21,10 @@ APP_NAME=AI-Powered Hiring Assessment Platform
21
  APP_VERSION=0.1.0
22
  APP_DESCRIPTION=MVP for managing hiring assessments using AI
23
 
 
 
24
  # AI Provider Configuration (for future use)
 
25
  OPENAI_API_KEY=
26
  ANTHROPIC_API_KEY=
27
  GOOGLE_AI_API_KEY=
 
21
  APP_VERSION=0.1.0
22
  APP_DESCRIPTION=MVP for managing hiring assessments using AI
23
 
24
+
25
+
26
  # AI Provider Configuration (for future use)
27
+ MISTRAL_API_KEY=your_mistral_api_key
28
  OPENAI_API_KEY=
29
  ANTHROPIC_API_KEY=
30
  GOOGLE_AI_API_KEY=
backend/api/application_routes.py CHANGED
@@ -16,7 +16,7 @@ logger = get_logger(__name__)
16
 
17
  router = APIRouter(prefix="/applications", tags=["applications"])
18
 
19
- @router.get("/jobs/{jid}/assessments/{aid}", response_model=ApplicationDetailedListResponse)
20
  def get_applications_list(jid: str, aid: str, page: int = 1, limit: int = 10, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
21
  """Get list of applications for an assessment"""
22
  logger.info(f"Retrieving applications list for job ID: {jid}, assessment ID: {aid}, page: {page}, limit: {limit} by user: {current_user.id}")
@@ -33,28 +33,170 @@ def get_applications_list(jid: str, aid: str, page: int = 1, limit: int = 10, db
33
  # Calculate total count
34
  total = len(get_applications_by_job_and_assessment(db, jid, aid, skip=0, limit=1000)) # Simplified for demo
35
 
36
- # Convert answers from JSON string to list and calculate scores
 
 
 
 
 
 
 
 
 
37
  application_responses = []
38
  for application in applications:
39
- application_dict = application.__dict__
40
- if application.answers:
41
- application_dict['answers'] = json.loads(application.answers)
42
- else:
43
- application_dict['answers'] = []
44
 
45
- # Calculate score (placeholder)
46
- application_dict['score'] = calculate_application_score(db, application.id)
47
- application_dict['passing_score'] = 0.0 # Placeholder
48
 
49
- application_responses.append(ApplicationResponse(**application_dict))
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  logger.info(f"Successfully retrieved {len(applications)} applications out of total {total} for job ID: {jid}, assessment ID: {aid}")
52
- return ApplicationDetailedListResponse(
53
- count=len(applications),
54
- total=total,
55
- data=application_responses
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  )
57
 
 
 
 
 
58
  @router.post("/jobs/{jid}/assessments/{aid}", response_model=dict) # Returns just id as per requirements
59
  def create_new_application(jid: str, aid: str, application: ApplicationCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
60
  """Create a new application for an assessment"""
 
16
 
17
  router = APIRouter(prefix="/applications", tags=["applications"])
18
 
19
+ @router.get("/jobs/{jid}/assessments/{aid}")
20
  def get_applications_list(jid: str, aid: str, page: int = 1, limit: int = 10, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
21
  """Get list of applications for an assessment"""
22
  logger.info(f"Retrieving applications list for job ID: {jid}, assessment ID: {aid}, page: {page}, limit: {limit} by user: {current_user.id}")
 
33
  # Calculate total count
34
  total = len(get_applications_by_job_and_assessment(db, jid, aid, skip=0, limit=1000)) # Simplified for demo
35
 
36
+ # Get the assessment to retrieve the passing score
37
+ assessment = get_assessment(db, aid)
38
+ if not assessment:
39
+ logger.error(f"Assessment not found for ID: {aid}")
40
+ raise HTTPException(
41
+ status_code=status.HTTP_404_NOT_FOUND,
42
+ detail="Assessment not found"
43
+ )
44
+
45
+ # Calculate scores and create responses
46
  application_responses = []
47
  for application in applications:
48
+ # Calculate score
49
+ score = calculate_application_score(db, application.id)
 
 
 
50
 
51
+ # Get user information
52
+ from services.user_service import get_user
53
+ user = get_user(db, application.user_id)
54
 
55
+ # Create response object that matches technical requirements exactly
56
+ application_response = {
57
+ 'id': application.id,
58
+ 'score': score,
59
+ 'passing_score': assessment.passing_score,
60
+ 'user': {
61
+ 'id': user.id if user else None,
62
+ 'first_name': user.first_name if user else None,
63
+ 'last_name': user.last_name if user else None,
64
+ 'email': user.email if user else None
65
+ } if user else None
66
+ }
67
+
68
+ application_responses.append(application_response)
69
 
70
  logger.info(f"Successfully retrieved {len(applications)} applications out of total {total} for job ID: {jid}, assessment ID: {aid}")
71
+ return {
72
+ 'count': len(applications),
73
+ 'total': total,
74
+ 'data': application_responses
75
+ }
76
+
77
+ @router.get("/jobs/{jid}/assessment_id/{aid}/applications/{id}", response_model=ApplicationDetailedResponse)
78
+ def get_application_detail(jid: str, aid: str, id: str, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
79
+ """Get detailed application information including answers"""
80
+ logger.info(f"Retrieving application detail for job ID: {jid}, assessment ID: {aid}, application ID: {id} by user: {current_user.id}")
81
+ # Only HR users can view application details
82
+ if current_user.role != "hr":
83
+ logger.warning(f"Unauthorized attempt to view application detail by user: {current_user.id} with role: {current_user.role}")
84
+ raise HTTPException(
85
+ status_code=status.HTTP_403_FORBIDDEN,
86
+ detail="Only HR users can view application details"
87
+ )
88
+
89
+ # Get the application
90
+ application = get_application(db, id)
91
+ if not application or application.job_id != jid or application.assessment_id != aid:
92
+ logger.warning(f"Application not found for job ID: {jid}, assessment ID: {aid}, application ID: {id}")
93
+ raise HTTPException(
94
+ status_code=status.HTTP_404_NOT_FOUND,
95
+ detail="Application not found for this job and assessment"
96
+ )
97
+
98
+ # Get the assessment to retrieve the passing score
99
+ assessment = get_assessment(db, aid)
100
+ if not assessment:
101
+ logger.error(f"Assessment not found for ID: {aid}")
102
+ raise HTTPException(
103
+ status_code=status.HTTP_404_NOT_FOUND,
104
+ detail="Assessment not found"
105
+ )
106
+
107
+ # Calculate score
108
+ score = calculate_application_score(db, application.id)
109
+
110
+ # Get user information
111
+ from services.user_service import get_user
112
+ user = get_user(db, application.user_id)
113
+
114
+ # Parse answers from JSON string
115
+ import json
116
+ answers = json.loads(application.answers) if application.answers else []
117
+
118
+ # Get the assessment questions to enrich the answers with question details
119
+ assessment_questions = json.loads(assessment.questions) if assessment.questions else []
120
+ question_map = {q['id']: q for q in assessment_questions}
121
+
122
+ # Enrich answers with question details and rationales
123
+ enriched_answers = []
124
+ for answer in answers:
125
+ question_id = answer.get('question_id')
126
+ question_data = question_map.get(question_id, {})
127
+
128
+ # For text-based questions, we might want to add rationale from AI scoring
129
+ rationale = 'No rationale available'
130
+ if question_data.get('type') == 'text_based':
131
+ # Use AI service to get rationale for text-based answers
132
+ from schemas.assessment import AssessmentQuestion, AssessmentQuestionOption
133
+ from schemas.enums import QuestionType
134
+
135
+ # Create a temporary question object for AI scoring
136
+ temp_question = AssessmentQuestion(
137
+ id=question_data['id'],
138
+ text=question_data['text'],
139
+ weight=question_data['weight'],
140
+ skill_categories=question_data['skill_categories'],
141
+ type=QuestionType(question_data['type']),
142
+ options=[AssessmentQuestionOption(text=opt['text'], value=opt['value']) for opt in question_data.get('options', [])],
143
+ correct_options=question_data.get('correct_options', [])
144
+ )
145
+
146
+ from services.ai_service import score_answer
147
+ try:
148
+ score_result = score_answer(
149
+ question=temp_question,
150
+ answer_text=answer.get('text', ''),
151
+ selected_options=answer.get('options', [])
152
+ )
153
+ rationale = score_result.get('rationale', 'No rationale provided') or 'No rationale provided'
154
+ except Exception:
155
+ rationale = 'Unable to generate rationale'
156
+
157
+ # Create an ApplicationAnswerWithQuestion object with proper field assignments
158
+ # The 'options' field in the parent class refers to selected options (List[str])
159
+ # The 'question_options' field in the child class refers to question options (List[dict])
160
+ from schemas.application import ApplicationAnswerWithQuestion
161
+ from schemas.enums import QuestionType
162
+ enriched_answer = ApplicationAnswerWithQuestion(
163
+ question_id=answer.get('question_id'),
164
+ text=answer.get('text'),
165
+ options=answer.get('options', []), # Selected options from the applicant (List[str])
166
+ question_text=question_data.get('text', ''),
167
+ weight=question_data.get('weight', 1),
168
+ skill_categories=question_data.get('skill_categories', []),
169
+ type=QuestionType(question_data.get('type', 'text_based')), # Convert to enum
170
+ question_options=question_data.get('options', []), # Question's possible options (List[dict])
171
+ correct_options=question_data.get('correct_options', []),
172
+ rationale=rationale
173
+ )
174
+
175
+ # Add the selected options as an additional attribute if needed
176
+ # But for now, we'll rely on the schema as defined
177
+ enriched_answers.append(enriched_answer)
178
+
179
+ # Create the detailed response
180
+ application_detail = ApplicationDetailedResponse(
181
+ id=application.id,
182
+ job_id=application.job_id,
183
+ assessment_id=application.assessment_id,
184
+ user_id=application.user_id,
185
+ answers=enriched_answers,
186
+ score=score,
187
+ passing_score=assessment.passing_score,
188
+ user={
189
+ 'id': user.id if user else None,
190
+ 'first_name': user.first_name if user else None,
191
+ 'last_name': user.last_name if user else None,
192
+ 'email': user.email if user else None
193
+ } if user else None
194
  )
195
 
196
+ logger.info(f"Successfully retrieved application detail for job ID: {jid}, assessment ID: {aid}, application ID: {id}")
197
+ return application_detail
198
+
199
+
200
  @router.post("/jobs/{jid}/assessments/{aid}", response_model=dict) # Returns just id as per requirements
201
  def create_new_application(jid: str, aid: str, application: ApplicationCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
202
  """Create a new application for an assessment"""
backend/api/assessment_routes.py CHANGED
@@ -85,7 +85,7 @@ def create_new_assessment(id: str, assessment: AssessmentCreate, db: Session = D
85
  return {"id": db_assessment.id}
86
 
87
  @router.patch("/jobs/{jid}/{aid}/regenerate")
88
- def regenerate_assessment(jid: str, aid: str, regenerate_data: AssessmentRegenerate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
89
  """Regenerate an assessment"""
90
  logger.info(f"Regenerating assessment for job ID: {jid}, assessment ID: {aid} by user: {current_user.id}")
91
  # Only HR users can regenerate assessments
@@ -95,7 +95,11 @@ def regenerate_assessment(jid: str, aid: str, regenerate_data: AssessmentRegener
95
  status_code=status.HTTP_403_FORBIDDEN,
96
  detail="Only HR users can regenerate assessments"
97
  )
98
- updated_assessment = regenerate_assessment(db, aid, **regenerate_data.model_dump(exclude_unset=True))
 
 
 
 
99
  if not updated_assessment:
100
  logger.warning(f"Assessment not found for regeneration with job ID: {jid}, assessment ID: {aid}")
101
  raise HTTPException(
 
85
  return {"id": db_assessment.id}
86
 
87
  @router.patch("/jobs/{jid}/{aid}/regenerate")
88
+ def regenerate_assessment_route(jid: str, aid: str, regenerate_data: AssessmentRegenerate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
89
  """Regenerate an assessment"""
90
  logger.info(f"Regenerating assessment for job ID: {jid}, assessment ID: {aid} by user: {current_user.id}")
91
  # Only HR users can regenerate assessments
 
95
  status_code=status.HTTP_403_FORBIDDEN,
96
  detail="Only HR users can regenerate assessments"
97
  )
98
+ # Extract parameters from the request data using dict() to maintain consistency with other routes
99
+ regenerate_params = regenerate_data.dict(exclude_unset=True)
100
+
101
+ # Call the service function with the extracted parameters
102
+ updated_assessment = regenerate_assessment(db, aid, **regenerate_params)
103
  if not updated_assessment:
104
  logger.warning(f"Assessment not found for regeneration with job ID: {jid}, assessment ID: {aid}")
105
  raise HTTPException(
backend/config.py CHANGED
@@ -25,6 +25,9 @@ class Settings(BaseSettings):
25
  algorithm: str = "HS256"
26
  access_token_expire_minutes: int = 30
27
 
 
 
 
28
  # Application Configuration
29
  app_name: str = "AI-Powered Hiring Assessment Platform"
30
  app_version: str = "0.1.0"
 
25
  algorithm: str = "HS256"
26
  access_token_expire_minutes: int = 30
27
 
28
+ # AI Provider Configuration
29
+ mistral_api_key: Optional[str] = None
30
+
31
  # Application Configuration
32
  app_name: str = "AI-Powered Hiring Assessment Platform"
33
  app_version: str = "0.1.0"
backend/integrations/ai_integration/ai_factory.py CHANGED
@@ -5,6 +5,7 @@ from integrations.ai_integration.mock_ai_generator import MockAIGenerator
5
  from integrations.ai_integration.openai_generator import OpenAIGenerator
6
  from integrations.ai_integration.anthropic_generator import AnthropicGenerator
7
  from integrations.ai_integration.google_ai_generator import GoogleAIGenerator
 
8
 
9
 
10
  class AIProvider(Enum):
@@ -15,6 +16,7 @@ class AIProvider(Enum):
15
  OPENAI = "openai"
16
  ANTHROPIC = "anthropic"
17
  GOOGLE = "google"
 
18
 
19
 
20
  class AIGeneratorFactory:
@@ -72,7 +74,8 @@ AIGeneratorFactory.register_provider(AIProvider.MOCK, MockAIGenerator)
72
  AIGeneratorFactory.register_provider(AIProvider.OPENAI, OpenAIGenerator)
73
  AIGeneratorFactory.register_provider(AIProvider.ANTHROPIC, AnthropicGenerator)
74
  AIGeneratorFactory.register_provider(AIProvider.GOOGLE, GoogleAIGenerator)
 
75
 
76
 
77
  # Optional: Create a default provider
78
- DEFAULT_PROVIDER = AIProvider.MOCK
 
5
  from integrations.ai_integration.openai_generator import OpenAIGenerator
6
  from integrations.ai_integration.anthropic_generator import AnthropicGenerator
7
  from integrations.ai_integration.google_ai_generator import GoogleAIGenerator
8
+ from integrations.ai_integration.mistral_generator import MistralGenerator
9
 
10
 
11
  class AIProvider(Enum):
 
16
  OPENAI = "openai"
17
  ANTHROPIC = "anthropic"
18
  GOOGLE = "google"
19
+ MISTRAL = "mistral"
20
 
21
 
22
  class AIGeneratorFactory:
 
74
  AIGeneratorFactory.register_provider(AIProvider.OPENAI, OpenAIGenerator)
75
  AIGeneratorFactory.register_provider(AIProvider.ANTHROPIC, AnthropicGenerator)
76
  AIGeneratorFactory.register_provider(AIProvider.GOOGLE, GoogleAIGenerator)
77
+ AIGeneratorFactory.register_provider(AIProvider.MISTRAL, MistralGenerator)
78
 
79
 
80
  # Optional: Create a default provider
81
+ DEFAULT_PROVIDER = AIProvider.MISTRAL
backend/integrations/ai_integration/mistral_generator.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import List, Dict, Any
4
+ from mistralai import Mistral
5
+ from schemas.assessment import AssessmentQuestion, AssessmentQuestionOption
6
+ from schemas.enums import QuestionType
7
+ from integrations.ai_integration.ai_generator_interface import AIGeneratorInterface
8
+ from config import settings
9
+
10
+
11
+ class MistralGenerator(AIGeneratorInterface):
12
+ """
13
+ Mistral Generator implementation for generating assessment questions using Mistral AI API.
14
+ """
15
+
16
+ def __init__(self):
17
+ """
18
+ Initialize the MistralGenerator with API key from settings.
19
+ """
20
+ api_key = os.getenv("MISTRAL_API_KEY") or getattr(settings, 'mistral_api_key', None)
21
+
22
+ if not api_key:
23
+ raise ValueError("MISTRAL_API_KEY environment variable is not set")
24
+
25
+ self.client = Mistral(api_key=api_key)
26
+
27
+ def generate_questions(
28
+ self,
29
+ title: str,
30
+ questions_types: List[str],
31
+ additional_note: str = None,
32
+ job_info: Dict[str, Any] = None
33
+ ) -> List[AssessmentQuestion]:
34
+ """
35
+ Generate questions using Mistral AI API based on the assessment title, job information, and specified question types.
36
+ """
37
+ # Prepare the prompt for Mistral AI
38
+ prompt = self._create_prompt(title, questions_types, additional_note, job_info)
39
+
40
+ messages = [
41
+ {"role": "system", "content": "You generate technical assessment questions."},
42
+ {"role": "user", "content": prompt},
43
+ ]
44
+
45
+ response = self.client.chat.complete(
46
+ model="mistral-small-latest",
47
+ messages=messages,
48
+ temperature=0.2,
49
+ )
50
+
51
+ content = response.choices[0].message.content
52
+ passed = 0
53
+ while passed < 5:
54
+ try:
55
+ try:
56
+ # Parse the JSON response from Mistral
57
+ questions_data = json.loads(content)
58
+ passed = 10
59
+ except json.JSONDecodeError:
60
+ content = content[7:-3].strip()
61
+ questions_data = json.loads(content)
62
+ passed = 10
63
+ except json.JSONDecodeError:
64
+ raise ValueError("Mistral returned invalid JSON")
65
+
66
+ # Convert the response to AssessmentQuestion objects
67
+ return self._convert_to_assessment_questions(questions_data)
68
+
69
+ def score_answer(
70
+ self,
71
+ question: AssessmentQuestion,
72
+ answer_text: str,
73
+ selected_options: List[str] = None
74
+ ) -> Dict[str, Any]:
75
+ """
76
+ Score an answer using Mistral AI API based on the question and the provided answer.
77
+ """
78
+ # Create a prompt for scoring the answer
79
+ if question.type == QuestionType.text_based:
80
+ prompt = f"""
81
+ Evaluate the following answer to a text-based question:
82
+
83
+ Question: {question.text}
84
+ Answer: {answer_text}
85
+
86
+ Please provide a score between 0 and 1, where 1 means completely correct and 0 means completely incorrect.
87
+ Also provide a brief rationale for the score.
88
+
89
+ Respond in the following JSON format:
90
+ {{
91
+ "score": float,
92
+ "rationale": str,
93
+ "correct": bool
94
+ }}
95
+ """
96
+ else:
97
+ # For multiple choice questions
98
+ selected_str = ", ".join(selected_options) if selected_options else "No options selected"
99
+ correct_str = ", ".join(question.correct_options) if question.correct_options else "Unknown"
100
+
101
+ prompt = f"""
102
+ Evaluate the following answer to a multiple-choice question:
103
+
104
+ Question: {question.text}
105
+ Selected Options: {selected_str}
106
+ Correct Options: {correct_str}
107
+
108
+ Please provide a score between 0 and 1, where 1 means completely correct and 0 means completely incorrect.
109
+ Also provide a brief rationale for the score.
110
+
111
+ Respond in the following JSON format:
112
+ {{
113
+ "score": float,
114
+ "rationale": str,
115
+ "correct": bool
116
+ }}
117
+ """
118
+
119
+ messages = [
120
+ {"role": "system", "content": "You are an expert at evaluating assessment answers."},
121
+ {"role": "user", "content": prompt},
122
+ ]
123
+
124
+ response = self.client.chat.complete(
125
+ model="mistral-small-latest",
126
+ messages=messages,
127
+ temperature=0.2,
128
+ )
129
+
130
+ content = response.choices[0].message.content
131
+ passed = 0
132
+ while passed < 5:
133
+ try:
134
+ try:
135
+ result = json.loads(content)
136
+ passed = 10 # Exit the loop successfully
137
+ except json.JSONDecodeError:
138
+ # Try to strip markdown code block markers
139
+ content = content[7:-3].strip()
140
+ result = json.loads(content)
141
+ passed = 10 # Exit the loop successfully
142
+ except json.JSONDecodeError:
143
+ raise ValueError("Mistral returned invalid JSON for answer scoring")
144
+
145
+ return {
146
+ 'score': result.get('score', 0.0),
147
+ 'rationale': result.get('rationale', ''),
148
+ 'correct': result.get('correct', False)
149
+ }
150
+
151
+ def _create_prompt(
152
+ self,
153
+ title: str,
154
+ questions_types: List[str],
155
+ additional_note: str = None,
156
+ job_info: Dict[str, Any] = None
157
+ ) -> str:
158
+ """
159
+ Create a prompt for Mistral AI based on the assessment requirements.
160
+ """
161
+ # Map question types to the expected format for Mistral
162
+ type_mapping = {
163
+ QuestionType.choose_one.value: "MCQ",
164
+ QuestionType.choose_many.value: "MCQ", # Multiple choice with multiple correct answers
165
+ QuestionType.text_based.value: "TEXT"
166
+ }
167
+
168
+ # Count the number of each type of question needed
169
+ mcq_count = questions_types.count(QuestionType.choose_one.value) + \
170
+ questions_types.count(QuestionType.choose_many.value)
171
+ text_count = questions_types.count(QuestionType.text_based.value)
172
+
173
+ # Build the job information section of the prompt
174
+ job_details = ""
175
+ if job_info:
176
+ job_title = job_info.get('title', '')
177
+ job_skills = job_info.get('skill_categories', [])
178
+ job_seniority = job_info.get('seniority', '')
179
+
180
+ job_details = f"""
181
+ Job Information:
182
+ - Title: {job_title}
183
+ - Skills: {', '.join(job_skills)}
184
+ - Seniority: {job_seniority}
185
+ """
186
+ else:
187
+ job_details = f"""
188
+ Job Information:
189
+ - Title: {title}
190
+ """
191
+
192
+ # Add additional note if provided
193
+ if additional_note:
194
+ job_details += f"- Additional Note: {additional_note}\n"
195
+
196
+ prompt = f"""
197
+ You are an assessment generator.
198
+
199
+ Generate EXACTLY {len(questions_types)} questions for the following job.
200
+
201
+ {job_details}
202
+
203
+ MANDATORY RULES:
204
+ 1. Output MUST be a JSON ARRAY with EXACTLY {len(questions_types)} objects.
205
+ 2. The list MUST contain:
206
+ - {mcq_count} MCQ questions (multiple choice)
207
+ - {text_count} TEXT questions (text-based)
208
+ 3. Do NOT include explanations or markdown.
209
+ 4. Follow the schema EXACTLY.
210
+
211
+ Schema for each question:
212
+
213
+ {{
214
+ "type": "MCQ | TEXT",
215
+ "prompt": "string",
216
+ "choices": ["string"], // For MCQ questions only
217
+ "correct_answer": "string | null", // For MCQ questions, string for correct choice; for TEXT questions, null
218
+ "difficulty": "easy | medium | hard",
219
+ "skill": "string"
220
+ }}
221
+
222
+ Rules per type:
223
+ - MCQ → 4 choices + correct_answer as the text of the correct choice
224
+ - TEXT → correct_answer = null
225
+
226
+ Return ONLY the JSON array.
227
+ """
228
+
229
+ return prompt
230
+
231
+ def _convert_to_assessment_questions(self, questions_data: List[Dict]) -> List[AssessmentQuestion]:
232
+ """
233
+ Convert the JSON response from Mistral to AssessmentQuestion objects.
234
+ """
235
+ assessment_questions = []
236
+
237
+ for i, q_data in enumerate(questions_data):
238
+ # Generate a unique ID for the question
239
+ question_id = f"mistral_{i}"
240
+
241
+ # Determine the question type based on the response
242
+ if q_data.get("type") == "MCQ":
243
+ # For multiple choice questions
244
+ question_type = QuestionType.choose_one # Default to choose_one
245
+
246
+ # Create options
247
+ options = []
248
+ for choice in q_data.get("choices", []):
249
+ option = AssessmentQuestionOption(text=choice, value=choice)
250
+ options.append(option)
251
+
252
+ # Find the correct option
253
+ correct_options = []
254
+ correct_answer = q_data.get("correct_answer")
255
+ if correct_answer:
256
+ # Find the option that matches the correct answer
257
+ for opt in options:
258
+ if opt.text == correct_answer:
259
+ correct_options.append(opt.value)
260
+ break
261
+ else:
262
+ # For text-based questions
263
+ question_type = QuestionType.text_based
264
+ options = []
265
+ correct_options = []
266
+
267
+ # Create the AssessmentQuestion object
268
+ question = AssessmentQuestion(
269
+ id=question_id,
270
+ text=q_data.get("prompt", ""),
271
+ weight=3, # Default weight
272
+ skill_categories=[q_data.get("skill", "General")], # Default to General if no skill specified
273
+ type=question_type,
274
+ options=options,
275
+ correct_options=correct_options
276
+ )
277
+
278
+ assessment_questions.append(question)
279
+
280
+ return assessment_questions
backend/requirements.txt CHANGED
Binary files a/backend/requirements.txt and b/backend/requirements.txt differ
 
backend/schemas/application.py CHANGED
@@ -28,7 +28,7 @@ class ApplicationAnswerWithQuestion(ApplicationAnswer):
28
  weight: int = Field(..., ge=1, le=5) # range 1-5
29
  skill_categories: List[str] = Field(..., min_items=1)
30
  type: QuestionType
31
- options: Optional[List[dict]] = []
32
  correct_options: Optional[List[str]] = []
33
  rationale: str = Field(..., min_length=1, max_length=1000)
34
 
 
28
  weight: int = Field(..., ge=1, le=5) # range 1-5
29
  skill_categories: List[str] = Field(..., min_items=1)
30
  type: QuestionType
31
+ question_options: Optional[List[dict]] = [] # Options for the question
32
  correct_options: Optional[List[str]] = []
33
  rationale: str = Field(..., min_length=1, max_length=1000)
34
 
backend/services/application_service.py CHANGED
@@ -102,7 +102,14 @@ def calculate_application_score(db: Session, application_id: str) -> float:
102
  # Parse the answers and questions
103
  import json
104
  try:
105
- answers = json.loads(application.answers) if application.answers else []
 
 
 
 
 
 
 
106
  questions = json.loads(assessment.questions) if assessment.questions else []
107
  except json.JSONDecodeError:
108
  logger.error(f"Failed to parse answers or questions for application ID: {application_id}")
 
102
  # Parse the answers and questions
103
  import json
104
  try:
105
+ # Check if answers is already a list (parsed) or a string (needs parsing)
106
+ if isinstance(application.answers, str):
107
+ answers = json.loads(application.answers) if application.answers else []
108
+ else:
109
+ # Assume it's already a list object
110
+ answers = application.answers if application.answers else []
111
+
112
+ # Questions should always be a JSON string from the database
113
  questions = json.loads(assessment.questions) if assessment.questions else []
114
  except json.JSONDecodeError:
115
  logger.error(f"Failed to parse answers or questions for application ID: {application_id}")