Spaces:
Sleeping
Sleeping
| """Tests for MCP context and response validation.""" | |
| import json | |
| import pytest | |
| from pathlib import Path | |
| from jsonschema import validate, ValidationError | |
| from schema import MCP_CONTEXT_SCHEMA, MCP_PREDICT_RESPONSE_SCHEMA | |
| from utils import format_results_to_mcp, get_mcp_context | |
| def test_mcp_context_schema(): | |
| """Test that the MCP context follows the schema.""" | |
| context = get_mcp_context() | |
| validate(instance=context, schema=MCP_CONTEXT_SCHEMA) | |
| # Test required fields | |
| assert "app" in context | |
| assert "id" in context["app"] | |
| assert "name" in context["app"] | |
| assert "version" in context["app"] | |
| # Test windows configuration | |
| assert "windows" in context | |
| assert len(context["windows"]) > 0 | |
| assert "objects" in context["windows"][0]["view"] | |
| # Test objects configuration | |
| assert "objects" in context | |
| file_objects = [obj for obj in context["objects"] if obj["type"] == "file"] | |
| assert len(file_objects) >= 2 # At least ligand and protein | |
| # Test actions configuration | |
| assert "actions" in context | |
| assert len(context["actions"]) >= 1 | |
| for action in context["actions"]: | |
| assert "parameters" in action | |
| assert "output" in action | |
| def test_mcp_response_format(): | |
| """Test that the MCP response formatter produces valid output.""" | |
| # Test successful case | |
| csv_text = "molecule,test1,test2\nmol1,True,False\n" | |
| response = format_results_to_mcp(csv_text, "", "mol1") | |
| validate(instance=response, schema=MCP_PREDICT_RESPONSE_SCHEMA) | |
| assert response["object_id"] == "validation_results" | |
| assert "columns" in response["data"] | |
| assert "rows" in response["data"] | |
| assert len(response["data"]["rows"]) == 1 | |
| # Test error case | |
| response = format_results_to_mcp("", "Error occurred", "mol1") | |
| validate(instance=response, schema=MCP_PREDICT_RESPONSE_SCHEMA) | |
| assert response["data"]["rows"][0][1] == "❌" # Status should be fail | |
| def test_mcp_response_content(): | |
| """Test the content of MCP responses for different scenarios.""" | |
| # Test all tests passing | |
| csv_text = "molecule,test1,test2\nmol1,True,True\n" | |
| response = format_results_to_mcp(csv_text, "", "mol1") | |
| row = response["data"]["rows"][0] | |
| assert row[1] == "✅" # Status | |
| assert row[2] == "2/2" # Passed/Total | |
| assert row[3] == "All tests passed" # Details | |
| # Test some tests failing | |
| csv_text = "molecule,test1,test2,test3\nmol1,True,False,False\n" | |
| response = format_results_to_mcp(csv_text, "", "mol1") | |
| row = response["data"]["rows"][0] | |
| assert row[1] == "❌" # Status | |
| assert row[2] == "1/3" # Passed/Total | |
| assert "test2" in row[3] and "test3" in row[3] # Failed tests in details | |
| def test_invalid_mcp_context(): | |
| """Test that invalid MCP context raises ValidationError.""" | |
| invalid_context = { | |
| "app": {"name": "Test"} # Missing required fields | |
| } | |
| with pytest.raises(ValidationError): | |
| validate(instance=invalid_context, schema=MCP_CONTEXT_SCHEMA) | |
| def test_invalid_mcp_response(): | |
| """Test that invalid MCP response raises ValidationError.""" | |
| invalid_response = { | |
| "object_id": "validation_results", | |
| "data": { | |
| "columns": ["col1"], | |
| "rows": [["too", "many", "columns"]] # Inconsistent columns | |
| } | |
| } | |
| with pytest.raises(ValidationError) as exc_info: | |
| # First validate that rows match column count | |
| columns_count = len(invalid_response["data"]["columns"]) | |
| for row in invalid_response["data"]["rows"]: | |
| if len(row) != columns_count: | |
| raise ValidationError(f"Row length {len(row)} does not match columns length {columns_count}") | |
| # Then validate schema | |
| validate(instance=invalid_response, schema=MCP_PREDICT_RESPONSE_SCHEMA) | |
| assert "Row length" in str(exc_info.value) |