import os import sys import ast import tempfile import subprocess from datetime import datetime from io import StringIO import traceback import gradio as gr # --- CodeEvaluator Class --- class CodeEvaluator: """ Evaluates Python code for correctness and quality. """ def __init__(self): self.code = "" self.correctness_passed = False self.execution_error = None self.quality_issues = [] self.quality_strengths = [] self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") def evaluate_correctness(self, code): self.code = code try: ast.parse(code) except SyntaxError as e: self.execution_error = f"Syntax Error: {str(e)}" return {'passed': False, 'error_type': 'SyntaxError', 'error': self.execution_error} try: old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = StringIO() sys.stderr = StringIO() exec(code, {}) sys.stdout = old_stdout sys.stderr = old_stderr self.correctness_passed = True return {'passed': True, 'error': None} except Exception as e: sys.stdout = old_stdout sys.stderr = old_stderr error_type = type(e).__name__ self.execution_error = f"{error_type}: {str(e)}" return {'passed': False, 'error_type': error_type, 'error': self.execution_error, 'traceback': traceback.format_exc()} def evaluate_quality(self): if not self.correctness_passed: return {'evaluated': False, 'message': 'Quality evaluation skipped - code must run first'} self.quality_issues = [] self.quality_strengths = [] pep8_issues = self._check_pep8() try: tree = ast.parse(self.code) self._check_naming_conventions(tree) self._check_documentation(tree) self._check_function_structure(tree) self._check_code_smells(tree) except Exception as e: self.quality_issues.append({'principle': 'Analysis Error', 'issue': f"Could not fully analyze code: {str(e)}"}) grade = self._calculate_grade() return {'evaluated': True, 'grade': grade, 'issues': self.quality_issues, 'strengths': self.quality_strengths, 'pep8_issues': pep8_issues} def _check_pep8(self): with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: f.write(self.code) temp_file = f.name try: result = subprocess.run(['pycodestyle', temp_file], capture_output=True, text=True) issues = [] if result.stdout: for line in result.stdout.strip().split('\n'): if line: issues.append(line) if not issues: self.quality_strengths.append("✓ Follows PEP 8 style guidelines") return issues finally: os.unlink(temp_file) def _check_naming_conventions(self, tree): issues = [] for node in ast.walk(tree): if isinstance(node, ast.FunctionDef): if not self._is_snake_case(node.name): issues.append(f"Function '{node.name}' should use snake_case naming") if isinstance(node, ast.ClassDef): if not self._is_pascal_case(node.name): issues.append(f"Class '{node.name}' should use PascalCase naming") if issues: self.quality_issues.append({'principle': 'Principle 3: Consistency', 'issue': 'Naming convention violations', 'details': issues}) else: self.quality_strengths.append("✓ Follows Python naming conventions") def _check_documentation(self, tree): functions_without_docs = [] total_functions = 0 for node in ast.walk(tree): if isinstance(node, ast.FunctionDef): total_functions += 1 if not ast.get_docstring(node): functions_without_docs.append(node.name) if functions_without_docs: self.quality_issues.append({'principle': 'Principle 5: Documentation', 'issue': f"{len(functions_without_docs)} function(s) missing docstrings", 'details': [f"Function '{name}' has no docstring" for name in functions_without_docs]}) elif total_functions > 0: self.quality_strengths.append("✓ All functions have docstrings") def _check_function_structure(self, tree): long_functions = [] for node in ast.walk(tree): if isinstance(node, ast.FunctionDef): if hasattr(node, 'end_lineno') and hasattr(node, 'lineno'): func_length = node.end_lineno - node.lineno if func_length > 50: long_functions.append((node.name, func_length)) if long_functions: self.quality_issues.append({'principle': 'Principle 6: Code Structure', 'issue': 'Functions are too long', 'details': [f"Function '{name}' is {length} lines (consider breaking into smaller functions)" for name, length in long_functions]}) def _check_code_smells(self, tree): bare_excepts = [] for node in ast.walk(tree): if isinstance(node, ast.ExceptHandler): if node.type is None: bare_excepts.append(node.lineno) if bare_excepts: self.quality_issues.append({'principle': 'Principle 7: Error Handling', 'issue': 'Bare except clauses found', 'details': [f"Line {line}: Use specific exception types instead of bare 'except:'" for line in bare_excepts]}) def _is_snake_case(self, name): if name.startswith('_'): name = name[1:] return name.islower() or '_' in name def _is_pascal_case(self, name): return name[0].isupper() and '_' not in name def _calculate_grade(self): issue_count = len(self.quality_issues) if issue_count == 0: return 'A' elif issue_count <= 2: return 'B' elif issue_count <= 4: return 'C' else: return 'D' # --- LearningPathGenerator Class --- class LearningPathGenerator: """ Generates personalized learning resources based on code evaluation. """ def __init__(self): self.error_resources = { 'SyntaxError': {'explanation': 'Python could not understand your code structure.', 'common_causes': ['Missing or mismatched parentheses, brackets, or quotes'], 'resources': ['https://realpython.com/invalid-syntax-python/']}, 'NameError': {'explanation': 'You tried to use a variable or function that does not exist.', 'common_causes': ['Typo in variable name'], 'resources': ['https://realpython.com/python-nameerror/']}, 'TypeError': {'explanation': 'An operation was performed on an incompatible data type.', 'common_causes': ['Trying to add strings and numbers'], 'resources': ['https://realpython.com/python-traceback/#typeerror']}, 'IndexError': {'explanation': 'You tried to access a list index that does not exist.', 'common_causes': ['List is shorter than expected'], 'resources': ['https://realpython.com/python-indexerror/']}, 'KeyError': {'explanation': 'You tried to access a dictionary key that does not exist.', 'common_causes': ['Typo in key name'], 'resources': ['https://realpython.com/python-keyerror/']}, 'ZeroDivisionError': {'explanation': 'You tried to divide by zero.', 'common_causes': ['Variable is zero when it should not be'], 'resources': ['https://realpython.com/python-traceback/#zerodivisionerror']} } self.principle_resources = { 'Principle 1: Readability': {'tips': ['Use descriptive variable names'], 'resources': ['https://peps.python.org/pep-0008/#code-lay-out']}, 'Principle 3: Consistency': {'tips': ['Functions and variables: snake_case'], 'resources': ['https://peps.python.org/pep-0008/#naming-conventions']}, 'Principle 4: DRY': {'tips': ['Extract repeated code into functions'], 'resources': ['https://realpython.com/python-functions/']}, 'Principle 5: Documentation': {'tips': ['Write docstrings for all functions'], 'resources': ['https://peps.python.org/pep-0257/']}, 'Principle 6: Code Structure': {'tips': ['Keep functions short and focused'], 'resources': ['https://realpython.com/python-refactoring/']}, 'Principle 7: Error Handling': {'tips': ['Use try/except for operations that might fail'], 'resources': ['https://realpython.com/python-exceptions/']} } def generate_learning_path(self, correctness_result, quality_result): path = [] path.append("\n" + "="*70) path.append("📚 YOUR PERSONALIZED LEARNING PATH") path.append("="*70 + "\n") if not correctness_result['passed']: path.append("🔴 PRIORITY: FIX CORRECTNESS ISSUES FIRST\n") error_type = correctness_result.get('error_type', 'Unknown') path.append(f"❌ Error Type: {error_type}") path.append(f" {correctness_result['error']}\n") if error_type in self.error_resources: resource = self.error_resources[error_type] path.append(f"💡 What this means:\n {resource['explanation']}\n") path.append("📖 Learn more:\n • " + resource['resources'][0] + "\n") path.append("✏️ Next Steps:\n 1. Read the error message carefully\n 2. Fix the issue\n 3. Run evaluation again") elif quality_result['evaluated'] and quality_result['issues']: path.append("✅ GREAT NEWS: Your code runs successfully!\n") path.append(f"📊 Quality Grade: {quality_result['grade']}\n") if quality_result['strengths']: path.append("💪 Strengths in your code:") for strength in quality_result['strengths']: path.append(f" {strength}") path.append("") path.append("💡 AREAS FOR IMPROVEMENT:\n") for i, issue in enumerate(quality_result['issues'], 1): path.append(f"{i}. {issue['principle']}") path.append(f" Issue: {issue['issue']}") if 'details' in issue: path.append(" Details:") for detail in issue['details'][:3]: path.append(f" • {detail}") principle_key = issue['principle'] if principle_key in self.principle_resources: resource = self.principle_resources[principle_key] path.append(" 💡 Tips:") for tip in resource['tips'][:2]: path.append(f" • {tip}") path.append(f" 📖 Learn more: {resource['resources'][0]}") path.append("") if quality_result.get('pep8_issues'): path.append("📏 PEP 8 Style Issues:") for issue in quality_result['pep8_issues'][:5]: path.append(f" • {issue}") if len(quality_result['pep8_issues']) > 5: remaining = len(quality_result['pep8_issues']) - 5 path.append(f" ... and {remaining} more") elif quality_result['evaluated']: path.append("🎉 EXCELLENT WORK!\n") path.append(f"📊 Quality Grade: {quality_result['grade']}\n") path.append("Your code runs successfully and follows best practices!\n") if quality_result['strengths']: path.append("✨ Strengths:") for strength in quality_result['strengths']: path.append(f" {strength}") path.append("") path.append("🚀 Ready for More?\n") path.append(" Next steps to improve your Python skills:") path.append(" • Learn about list comprehensions") path.append(" • Explore context managers (with statements)") path.append(" • Study decorators and generators") path.append(" • Practice writing unit tests") path.append("="*70) return "\n".join(path) # --- Main Evaluation Function for Gradio Interface --- def evaluate_code_for_gradio(code_to_evaluate): """ Main evaluation function that returns formatted results for Gradio. """ # Add a progress indicator status_message = "🔍 Evaluating your code...\n\n" try: evaluator = CodeEvaluator() learning_gen = LearningPathGenerator() # Phase 1: Correctness Evaluation status_message += "⚙️ Phase 1: Testing if code runs...\n" correctness_result = evaluator.evaluate_correctness(code_to_evaluate) # Phase 2: Quality Evaluation (only if correctness passed) quality_result = {'evaluated': False} if correctness_result['passed']: status_message += "✅ Code runs successfully!\n" status_message += "📊 Phase 2: Evaluating code quality...\n" quality_result = evaluator.evaluate_quality() status_message += f"✅ Quality Grade: {quality_result['grade']}\n\n" else: status_message += f"❌ Code failed: {correctness_result.get('error_type', 'Error')}\n\n" # Phase 3: Generate Learning Path learning_path = learning_gen.generate_learning_path(correctness_result, quality_result) # Combine status and learning path full_output = status_message + learning_path return full_output except Exception as e: # If something goes wrong, return error message return f"❌ An error occurred during evaluation:\n\n{str(e)}\n\nPlease try again or report this issue." # --- Gradio Interface Setup --- with gr.Blocks(title="🎓 AICELS: Code Quality Learning App") as demo: gr.Markdown(""" # 🎓 AICELS: AI-Powered Code Evaluation and Learning System **Correctness First, Quality Second** This tool evaluates Python code in two phases: 1. **Correctness**: Does your code run without errors? 2. **Quality**: How well does it follow best practices? Paste your Python code below and get personalized feedback! """) with gr.Row(): with gr.Column(): code_input = gr.Code( label="Paste your Python code here:", language="python", lines=20 ) submit_btn = gr.Button("🚀 Evaluate Code", variant="primary") with gr.Column(): output = gr.Textbox( label="Evaluation Results & Learning Path", lines=30, max_lines=50 ) gr.Markdown(""" ### Example Code to Try: """) gr.Examples( examples=[ ["def hello():\n print('Hello World')\n\nhello()"], ["def calculate_sum(a, b):\n \"\"\"Add two numbers.\"\"\"\n return a + b\n\nresult = calculate_sum(5, 3)\nprint(f\"Result: {result}\")"], ["def calc(p,r,t):\n return p*(1+r)**t\n\nresult = calc(1000,0.05,10)\nprint(result)"], ], inputs=code_input, label="Click an example to load it" ) submit_btn.click( fn=evaluate_code_for_gradio, inputs=code_input, outputs=output ) if __name__ == '__main__': demo.launch()