Spaces:
Sleeping
Sleeping
firepenguindisopanda
Refactor documentation and role structures; update auth and web routers
4da2f57 | """ | |
| Tests for schema validation. | |
| """ | |
| import pytest | |
| from pydantic import ValidationError | |
| from app.core.schemas import ( | |
| TeamRole, | |
| ProjectRequest, | |
| AgentResponse, | |
| JudgeOutput, | |
| JudgeIssue, | |
| ) | |
| class TestTeamRole: | |
| """Tests for TeamRole enum.""" | |
| def test_all_roles_have_values(self): | |
| """All roles should have string values.""" | |
| for role in TeamRole: | |
| assert isinstance(role.value, str) | |
| assert len(role.value) > 0 | |
| def test_role_values_are_lowercase(self): | |
| """Role values should be lowercase.""" | |
| for role in TeamRole: | |
| assert role.value == role.value.lower() | |
| def test_expected_roles_exist(self): | |
| """Expected core roles should exist.""" | |
| expected = [ | |
| "product_owner", | |
| "business_analyst", | |
| "solution_architect", | |
| "qa_strategist", | |
| "ux_designer", | |
| "technical_writer", | |
| ] | |
| role_values = [r.value for r in TeamRole] | |
| for expected_role in expected: | |
| assert expected_role in role_values, f"Missing role: {expected_role}" | |
| class TestProjectRequest: | |
| """Tests for ProjectRequest schema.""" | |
| def test_valid_request(self): | |
| """Valid request should pass validation.""" | |
| request = ProjectRequest( | |
| description="Build a todo app with user authentication", | |
| ) | |
| assert "todo" in request.description.lower() | |
| def test_request_with_all_fields(self): | |
| """Request with all optional fields.""" | |
| request = ProjectRequest( | |
| description="Full stack app", | |
| frontend_framework="React", | |
| backend_framework="FastAPI", | |
| database="PostgreSQL", | |
| auth_service="Auth0", | |
| payment_gateway="Stripe", | |
| include_docker=True, | |
| include_cicd=True, | |
| ) | |
| assert request.frontend_framework == "React" | |
| assert request.include_docker is True | |
| def test_empty_description_allowed(self): | |
| """Empty description should be allowed.""" | |
| request = ProjectRequest(description="") | |
| assert request.description == "" | |
| def test_serialization(self): | |
| """Request should serialize to dict.""" | |
| request = ProjectRequest( | |
| description="Test project", | |
| frontend_framework="Vue", | |
| ) | |
| data = request.model_dump() | |
| assert isinstance(data, dict) | |
| assert data["description"] == "Test project" | |
| assert data["frontend_framework"] == "Vue" | |
| class TestAgentResponse: | |
| """Tests for AgentResponse schema.""" | |
| def test_valid_response(self): | |
| """Valid response should pass validation.""" | |
| response = AgentResponse( | |
| role=TeamRole.SOLUTION_ARCHITECT, | |
| content="## Implementation\n\nCode here...", | |
| ) | |
| assert response.role == TeamRole.SOLUTION_ARCHITECT | |
| assert "Implementation" in response.content | |
| def test_response_with_metadata(self): | |
| """Response with metadata.""" | |
| response = AgentResponse( | |
| role=TeamRole.BUSINESS_ANALYST, | |
| content="Analysis results", | |
| metadata={"tokens": 500, "model": "llama"}, | |
| ) | |
| assert response.metadata["tokens"] == 500 | |
| def test_response_serialization(self): | |
| """Response should serialize properly.""" | |
| response = AgentResponse( | |
| role=TeamRole.BUSINESS_ANALYST, | |
| content="Analysis results", | |
| ) | |
| data = response.model_dump() | |
| assert isinstance(data, dict) | |
| assert "content" in data | |
| class TestJudgeIssue: | |
| """Tests for JudgeIssue schema.""" | |
| def test_valid_issue(self): | |
| """Valid issue should pass validation.""" | |
| issue = JudgeIssue( | |
| id="FR-001", | |
| type="incomplete", | |
| severity="high", | |
| suggestion="Add more detail", | |
| ) | |
| assert issue.id == "FR-001" | |
| assert issue.severity == "high" | |
| class TestJudgeOutput: | |
| """Tests for JudgeOutput schema.""" | |
| def test_valid_approved_output(self): | |
| """Valid approved judge output.""" | |
| output = JudgeOutput( | |
| is_approved=True, | |
| score=9, | |
| issues=[], | |
| recommended_action="accept", | |
| feedback="Excellent work!", | |
| reasoning="All requirements met.", | |
| ) | |
| assert output.is_approved is True | |
| assert output.score == 9 | |
| def test_valid_rejected_output(self): | |
| """Valid rejected judge output with issues.""" | |
| issue = JudgeIssue( | |
| id="NFR-002", | |
| type="missing_field", | |
| severity="medium", | |
| suggestion="Include performance requirements", | |
| ) | |
| output = JudgeOutput( | |
| is_approved=False, | |
| score=4, | |
| issues=[issue], | |
| recommended_action="retry", | |
| feedback="Needs improvement", | |
| reasoning="Missing key requirements.", | |
| ) | |
| assert output.is_approved is False | |
| assert len(output.issues) == 1 | |
| assert output.recommended_action == "retry" | |
| def test_score_range(self): | |
| """Score should be 1-10.""" | |
| output = JudgeOutput( | |
| is_approved=True, | |
| score=5, | |
| issues=[], | |
| recommended_action="accept", | |
| feedback="OK", | |
| reasoning="Acceptable quality.", | |
| ) | |
| assert 1 <= output.score <= 10 | |
| def test_score_below_range_fails(self): | |
| """Score below 1 should fail.""" | |
| with pytest.raises(ValidationError): | |
| JudgeOutput( | |
| is_approved=False, | |
| score=0, | |
| issues=[], | |
| recommended_action="retry", | |
| feedback="Bad", | |
| reasoning="Score too low.", | |
| ) | |
| def test_score_above_range_fails(self): | |
| """Score above 10 should fail.""" | |
| with pytest.raises(ValidationError): | |
| JudgeOutput( | |
| is_approved=True, | |
| score=11, | |
| issues=[], | |
| recommended_action="accept", | |
| feedback="Perfect", | |
| reasoning="Score too high.", | |
| ) | |
| def test_serialization(self): | |
| """JudgeOutput should serialize properly.""" | |
| output = JudgeOutput( | |
| is_approved=True, | |
| score=8, | |
| issues=[], | |
| recommended_action="accept", | |
| feedback="Good", | |
| reasoning="Meets standards.", | |
| ) | |
| data = output.model_dump() | |
| assert isinstance(data, dict) | |
| assert data["is_approved"] is True | |
| assert data["score"] == 8 | |