Spaces:
Running
Running
| """Tests for task bank integrity.""" | |
| import sys | |
| from pathlib import Path | |
| sys.path.insert(0, str(Path(__file__).resolve().parents[1])) | |
| from task_bank import ( | |
| ALL_TASKS, | |
| ALGORITHMS, | |
| EASY_TASKS, | |
| HARD_TASKS, | |
| MATH_TOPICS, | |
| MEDIUM_TASKS, | |
| ML_CONCEPTS, | |
| STATISTICS_TASKS, | |
| Task, | |
| ) | |
| def test_task_counts(): | |
| assert len(ML_CONCEPTS) >= 5, f"ML_CONCEPTS has {len(ML_CONCEPTS)} (need >=5)" | |
| assert len(MATH_TOPICS) >= 5, f"MATH_TOPICS has {len(MATH_TOPICS)} (need >=5)" | |
| assert len(ALGORITHMS) >= 3, f"ALGORITHMS has {len(ALGORITHMS)} (need >=3)" | |
| assert len(STATISTICS_TASKS) >= 2, f"STATISTICS_TASKS has {len(STATISTICS_TASKS)} (need >=2)" | |
| assert len(ALL_TASKS) == len(ML_CONCEPTS) + len(MATH_TOPICS) + len(ALGORITHMS) + len(STATISTICS_TASKS) | |
| def test_difficulty_partition(): | |
| assert len(EASY_TASKS) + len(MEDIUM_TASKS) + len(HARD_TASKS) == len(ALL_TASKS) | |
| assert len(EASY_TASKS) > 0 | |
| assert len(MEDIUM_TASKS) > 0 | |
| assert len(HARD_TASKS) > 0 | |
| def test_task_fields(): | |
| for t in ALL_TASKS: | |
| assert isinstance(t, Task) | |
| assert t.topic, f"empty topic: {t}" | |
| assert t.content, f"empty content: {t}" | |
| assert t.tier in ("beginner", "intermediate", "advanced"), f"bad tier: {t.tier}" | |
| assert t.keywords, f"empty keywords: {t.topic}" | |
| assert t.preferred_format in ("marimo", "manim", None), f"bad format: {t.preferred_format}" | |
| assert t.difficulty in ("easy", "medium", "hard"), f"bad difficulty: {t.difficulty}" | |
| def test_both_formats_present(): | |
| formats = {t.preferred_format for t in ALL_TASKS} | |
| assert "marimo" in formats, "no marimo tasks" | |
| assert "manim" in formats, "no manim tasks" | |
| if __name__ == "__main__": | |
| test_task_counts() | |
| test_difficulty_partition() | |
| test_task_fields() | |
| test_both_formats_present() | |
| print("PASS: test_task_bank (4/4)") | |