| import random |
| import uuid |
| from typing import List, Dict, Any |
| from schemas.assessment import AssessmentQuestion, AssessmentQuestionOption |
| from schemas.enums import QuestionType |
| from integrations.ai_integration.ai_generator_interface import AIGeneratorInterface |
|
|
|
|
| class MockAIGenerator(AIGeneratorInterface): |
| """ |
| Mock AI Generator implementation for testing purposes. |
| Generates questions based on predefined templates and job information. |
| """ |
| |
| def generate_questions( |
| self, |
| title: str, |
| questions_types: List[str], |
| additional_note: str = None, |
| job_info: Dict[str, Any] = None |
| ) -> List[AssessmentQuestion]: |
| """ |
| Generate questions using mock AI logic based on job information. |
| """ |
| |
| if job_info: |
| |
| seniority = job_info.get('seniority', '').lower() |
| skill_count = len(job_info.get('skill_categories', [])) |
| |
| |
| if seniority in ['senior', 'lead']: |
| base_questions = 15 |
| elif seniority in ['mid', 'intermediate']: |
| base_questions = 12 |
| else: |
| base_questions = 10 |
| |
| |
| adjusted_questions = base_questions + (skill_count // 2) |
| |
| |
| min_questions = len(questions_types) |
| total_questions = max(adjusted_questions, min_questions) |
| else: |
| |
| total_questions = max(10, len(questions_types)) |
|
|
| generated_questions = [] |
|
|
| for i in range(total_questions): |
| |
| q_type = questions_types[i % len(questions_types)] |
|
|
| |
| question_id = str(uuid.uuid4()) |
|
|
| |
| question_text = self._generate_question_text(title, q_type, i+1, additional_note, job_info) |
|
|
| |
| weight = random.randint(1, 5) |
|
|
| |
| skill_categories = self._generate_skill_categories(title, job_info) |
|
|
| |
| options = [] |
| correct_options = [] |
|
|
| if q_type in [QuestionType.choose_one.value, QuestionType.choose_many.value]: |
| options = self._generate_multiple_choice_options(q_type, question_text) |
| correct_options = self._select_correct_options(options, q_type) |
|
|
| |
| question = AssessmentQuestion( |
| id=question_id, |
| text=question_text, |
| weight=weight, |
| skill_categories=skill_categories, |
| type=QuestionType(q_type), |
| options=options, |
| correct_options=correct_options |
| ) |
|
|
| generated_questions.append(question) |
|
|
| return generated_questions |
|
|
| def _generate_question_text(self, title: str, q_type: str, question_number: int, additional_note: str = None, job_info: Dict[str, Any] = None) -> str: |
| """Generate a question text based on the assessment title, job info and question type.""" |
| |
| normalized_title = title.lower() |
| |
| |
| job_title = job_info.get('title', '') if job_info else '' |
| job_description = job_info.get('description', '') if job_info else '' |
| job_seniority = job_info.get('seniority', '') if job_info else '' |
| job_skills = job_info.get('skill_categories', []) if job_info else [] |
| |
| |
| if "python" in normalized_title or "programming" in normalized_title or "python" in job_title.lower() or "programming" in job_title.lower(): |
| base_questions = [ |
| f"What is the correct way to declare a variable in {title}?", |
| f"How would you implement a function to solve a problem in {title}?", |
| f"Which of the following is a characteristic of {title}?", |
| f"What is the time complexity of this operation in {title}?", |
| f"In {title}, what is the purpose of this code snippet?", |
| f"What is the output of this {title} code?", |
| f"Which {title} concept is best suited for this scenario?", |
| f"What is the main advantage of using {title} in this context?" |
| ] |
| elif "software" in normalized_title or "engineer" in normalized_title or "software" in job_title.lower() or "engineer" in job_title.lower(): |
| base_questions = [ |
| f"What is the most efficient approach to design a system for {title}?", |
| f"Which software development principle applies to {title}?", |
| f"How would you optimize the performance of a {title} application?", |
| f"What is the best practice for error handling in {title}?", |
| f"Which testing methodology is most appropriate for {title}?", |
| f"What architectural pattern would you recommend for {title}?", |
| f"How would you ensure scalability in {title}?", |
| f"What security consideration is important for {title}?" |
| ] |
| elif "data" in normalized_title or "analysis" in normalized_title or "data" in job_title.lower() or "analysis" in job_title.lower(): |
| base_questions = [ |
| f"How would you clean and preprocess data for {title}?", |
| f"Which statistical method is appropriate for {title}?", |
| f"What visualization technique best represents {title}?", |
| f"How would you handle missing values in {title}?", |
| f"What is the correlation between variables in {title}?", |
| f"Which machine learning model is suitable for {title}?", |
| f"How would you validate the results of {title}?", |
| f"What ethical consideration applies to {title}?" |
| ] |
| elif job_skills: |
| |
| skills_context = ", ".join(job_skills) |
| base_questions = [ |
| f"How would you apply {skills_context} skills in this {title} role?", |
| f"What challenges might you face using {skills_context} in this position?", |
| f"Which {skills_context} techniques are most relevant for this {title}?", |
| f"How would you leverage your {skills_context} experience in this role?", |
| f"What {skills_context} methodologies would you use for this {title}?", |
| f"How do {skills_context} skills contribute to success in this position?", |
| f"What {skills_context} tools would be most effective for this {title}?", |
| f"How would you apply {skills_context} best practices in this role?" |
| ] |
| else: |
| |
| base_questions = [ |
| f"What is the fundamental concept behind {title}?", |
| f"How would you approach solving a problem in {title}?", |
| f"What are the key characteristics of {title}?", |
| f"What is the main purpose of {title}?", |
| f"Which principle governs {title}?", |
| f"How does {title} differ from similar concepts?", |
| f"What are the advantages of using {title}?", |
| f"What limitations should be considered in {title}?" |
| ] |
| |
| |
| question_index = (question_number * 7) % len(base_questions) |
| question_text = base_questions[question_index] |
| |
| |
| if additional_note: |
| question_text += f" ({additional_note})" |
| |
| |
| if job_description: |
| question_text += f" Consider the following job description: {job_description[:100]}..." |
| |
| return question_text |
|
|
| def _generate_skill_categories(self, title: str, job_info: Dict[str, Any] = None) -> List[str]: |
| """Generate skill categories based on the assessment title and job information.""" |
| normalized_title = title.lower() |
| categories = ["general"] |
| |
| |
| job_title = job_info.get('title', '') if job_info else '' |
| job_seniority = job_info.get('seniority', '') if job_info else '' |
| job_skills = job_info.get('skill_categories', []) if job_info else [] |
| |
| |
| combined_title = f"{title} {job_title}".lower() |
| |
| if "python" in combined_title: |
| categories.extend(["python", "programming", "backend"]) |
| elif "javascript" in combined_title or "js" in combined_title: |
| categories.extend(["javascript", "programming", "frontend"]) |
| elif "react" in combined_title: |
| categories.extend(["react", "javascript", "frontend"]) |
| elif "data" in combined_title or "analysis" in combined_title: |
| categories.extend(["data-analysis", "statistics", "visualization"]) |
| elif "machine learning" in combined_title or "ml" in combined_title: |
| categories.extend(["machine-learning", "algorithms", "data-science"]) |
| elif "devops" in combined_title: |
| categories.extend(["devops", "ci/cd", "infrastructure"]) |
| elif "security" in combined_title: |
| categories.extend(["security", "cybersecurity", "vulnerability"]) |
| elif "software" in combined_title or "engineer" in combined_title: |
| categories.extend(["software-engineering", "design-patterns", "algorithms"]) |
| |
| |
| if job_skills: |
| categories.extend(job_skills) |
| |
| |
| if job_seniority: |
| if job_seniority == "intern": |
| categories.extend(["learning", "basic-concepts", "mentoring"]) |
| elif job_seniority == "junior": |
| categories.extend(["development", "coding", "implementation"]) |
| elif job_seniority == "mid": |
| categories.extend(["problem-solving", "architecture", "teamwork"]) |
| elif job_seniority == "senior": |
| categories.extend(["leadership", "architecture", "decision-making"]) |
| |
| |
| categories.extend(["problem-solving", "critical-thinking"]) |
| |
| |
| return list(set(categories))[:5] |
|
|
| def _generate_multiple_choice_options(self, q_type: str, question_text: str) -> List[AssessmentQuestionOption]: |
| """Generate multiple choice options for a question.""" |
| options = [] |
| |
| |
| num_options = random.randint(3, 5) |
| |
| for i in range(num_options): |
| option_letter = chr(ord('a') + i) |
| |
| |
| if "python" in question_text.lower(): |
| option_texts = [ |
| f"Option {option_letter}: This approach uses Python's built-in functions", |
| f"Option {option_letter}: This solution involves a custom class implementation", |
| f"Option {option_letter}: This method leverages external libraries", |
| f"Option {option_letter}: This technique uses recursion", |
| f"Option {option_letter}: This algorithm has O(n) time complexity", |
| f"Option {option_letter}: This pattern follows Python best practices" |
| ] |
| elif "software" in question_text.lower() or "design" in question_text.lower(): |
| option_texts = [ |
| f"Option {option_letter}: This follows the singleton pattern", |
| f"Option {option_letter}: This implements the observer pattern", |
| f"Option {option_letter}: This uses the factory method", |
| f"Option {option_letter}: This applies the decorator pattern", |
| f"Option {option_letter}: This utilizes microservices architecture", |
| f"Option {option_letter}: This employs event-driven design" |
| ] |
| else: |
| option_texts = [ |
| f"Option {option_letter}: This is the correct approach", |
| f"Option {option_letter}: This is an alternative method", |
| f"Option {option_letter}: This is a common misconception", |
| f"Option {option_letter}: This relates to advanced concepts", |
| f"Option {option_letter}: This is a basic implementation", |
| f"Option {option_letter}: This is an outdated approach" |
| ] |
| |
| |
| option_index = (i * 11) % len(option_texts) |
| option_text = option_texts[option_index] |
| |
| option = AssessmentQuestionOption( |
| text=option_text, |
| value=option_letter |
| ) |
| |
| options.append(option) |
| |
| return options |
|
|
| def _select_correct_options(self, options: List[AssessmentQuestionOption], q_type: str) -> List[str]: |
| """Select the correct options for a question.""" |
| if not options: |
| return [] |
|
|
| |
| if q_type == QuestionType.choose_one.value: |
| |
| correct_index = random.randint(0, len(options) - 1) |
| return [options[correct_index].value] |
|
|
| |
| elif q_type == QuestionType.choose_many.value: |
| |
| num_correct = random.randint(1, min(2, len(options))) |
|
|
| |
| correct_indices = random.sample(range(len(options)), num_correct) |
|
|
| |
| return [options[i].value for i in correct_indices] |
|
|
| |
| return [] |
|
|
| def score_answer( |
| self, |
| question: AssessmentQuestion, |
| answer_text: str, |
| selected_options: List[str] = None |
| ) -> Dict[str, Any]: |
| """ |
| Score an answer based on the question and the provided answer. |
| |
| Args: |
| question: The question being answered |
| answer_text: The text of the answer (for text-based questions) |
| selected_options: Selected options (for multiple choice questions) |
| |
| Returns: |
| Dictionary containing score information: |
| { |
| 'score': float, # Score between 0 and 1 |
| 'rationale': str, # Explanation of the score |
| 'correct': bool # Whether the answer is correct |
| } |
| """ |
| |
| if question.type in [QuestionType.choose_one, QuestionType.choose_many]: |
| if selected_options is None: |
| selected_options = [] |
|
|
| |
| correct = set(selected_options) == set(question.correct_options) |
|
|
| if correct: |
| score = 1.0 |
| rationale = f"The selected options {selected_options} match the correct options {question.correct_options}." |
| else: |
| score = 0.0 |
| rationale = f"The selected options {selected_options} do not match the correct options {question.correct_options}." |
|
|
| return { |
| 'score': score, |
| 'rationale': rationale, |
| 'correct': correct |
| } |
|
|
| |
| elif question.type == QuestionType.text_based: |
| |
| if answer_text and answer_text.strip(): |
| |
| |
| |
| score = self._evaluate_text_answer(answer_text, question.text) |
| rationale = f"The text answer was evaluated with score {score}." |
| else: |
| score = 0.0 |
| rationale = "No answer was provided." |
|
|
| return { |
| 'score': score, |
| 'rationale': rationale, |
| 'correct': score > 0.5 |
| } |
|
|
| |
| return { |
| 'score': 0.0, |
| 'rationale': "Unable to score this type of question.", |
| 'correct': False |
| } |
|
|
| def _evaluate_text_answer(self, answer_text: str, question_text: str) -> float: |
| """ |
| Evaluate a text-based answer (simulated AI evaluation). |
| In a real implementation, this would call an AI service to evaluate the answer quality. |
| |
| Args: |
| answer_text: The text of the answer provided by the user |
| question_text: The text of the question being answered |
| |
| Returns: |
| Score between 0 and 1 |
| """ |
| |
| score = 0.0 |
|
|
| |
| if len(answer_text.split()) >= 5: |
| score += 0.3 |
|
|
| |
| question_keywords = set(question_text.lower().split()) |
| answer_words = set(answer_text.lower().split()) |
| common_words = question_keywords.intersection(answer_words) |
|
|
| if len(common_words) > 0: |
| score += 0.2 |
|
|
| |
| if len(answer_text) > 100: |
| score += 0.2 |
|
|
| |
| return min(score, 1.0) |
|
|
| def estimate_duration( |
| self, |
| prompt: str |
| ) -> str: |
| """ |
| Estimate the duration for an assessment based on a prompt. |
| |
| Args: |
| prompt: A detailed prompt describing the assessment |
| |
| Returns: |
| String response from the AI containing the estimated duration |
| """ |
| |
| |
| import random |
| import re |
|
|
| |
| |
| question_count = prompt.count("Question ") |
| |
| |
| seniority_match = re.search(r'- Seniority: (\w+)', prompt) |
| seniority = seniority_match.group(1).lower() if seniority_match else 'junior' |
| |
| |
| text_questions = prompt.count("(Text-based question requiring written response)") |
| |
| |
| base_duration = question_count * 2 |
| |
| |
| if seniority in ['senior', 'lead']: |
| base_duration = int(base_duration * 1.5) |
| elif seniority in ['mid', 'intermediate']: |
| base_duration = int(base_duration * 1.2) |
| |
| |
| |
| if text_questions > 0: |
| base_duration += text_questions * 2 |
| |
| |
| estimated_minutes = max(5, base_duration + random.randint(-1, 3)) |
|
|
| |
| estimated_minutes = min(180, max(5, estimated_minutes)) |
|
|
| return f"{estimated_minutes} minutes" |