Spaces:
Sleeping
Sleeping
| """ | |
| Mock vs Real comparison utility. | |
| Compares mock responses against real API responses to validate accuracy. | |
| """ | |
| from typing import Dict, Any, List | |
| import json | |
| from difflib import unified_diff | |
| class MockRealComparator: | |
| """Compare mock and real API responses.""" | |
| def __init__(self): | |
| self.differences = [] | |
| def compare(self, mock_response: Dict, real_response: Dict) -> Dict: | |
| """ | |
| Compare mock and real responses. | |
| Args: | |
| mock_response: Response from mock mode | |
| real_response: Response from real API | |
| Returns: | |
| Comparison result with similarity score and differences | |
| """ | |
| # Extract actual data | |
| mock_data = mock_response.get('data', {}) | |
| real_data = real_response.get('data', {}) | |
| # Calculate similarity | |
| similarity = self._calculate_similarity(mock_data, real_data) | |
| # Find differences | |
| differences = self._find_differences(mock_data, real_data) | |
| # Generate recommendations | |
| recommendations = self._generate_recommendations(differences) | |
| return { | |
| 'similarity_score': similarity, | |
| 'differences': differences, | |
| 'recommendations': recommendations, | |
| 'mock_accurate': similarity > 0.8, | |
| 'summary': self._generate_summary(similarity, differences) | |
| } | |
| def _calculate_similarity(self, mock: Any, real: Any, path: str = '') -> float: | |
| """ | |
| Calculate similarity score between mock and real data. | |
| Returns score from 0.0 to 1.0. | |
| """ | |
| if type(mock) != type(real): | |
| return 0.0 | |
| if isinstance(mock, dict): | |
| if not mock and not real: | |
| return 1.0 | |
| mock_keys = set(mock.keys()) | |
| real_keys = set(real.keys()) | |
| # Key similarity | |
| common_keys = mock_keys & real_keys | |
| all_keys = mock_keys | real_keys | |
| if not all_keys: | |
| return 1.0 | |
| key_similarity = len(common_keys) / len(all_keys) | |
| # Value similarity for common keys | |
| value_similarities = [] | |
| for key in common_keys: | |
| val_sim = self._calculate_similarity(mock[key], real[key], f"{path}.{key}") | |
| value_similarities.append(val_sim) | |
| if value_similarities: | |
| avg_value_similarity = sum(value_similarities) / len(value_similarities) | |
| return (key_similarity + avg_value_similarity) / 2 | |
| return key_similarity | |
| elif isinstance(mock, list): | |
| if not mock and not real: | |
| return 1.0 | |
| if len(mock) != len(real): | |
| return 0.5 # Partial credit for list length difference | |
| similarities = [] | |
| for i, (m, r) in enumerate(zip(mock, real)): | |
| sim = self._calculate_similarity(m, r, f"{path}[{i}]") | |
| similarities.append(sim) | |
| return sum(similarities) / len(similarities) if similarities else 0.0 | |
| else: | |
| # Primitive types | |
| return 1.0 if mock == real else 0.0 | |
| def _find_differences(self, mock: Any, real: Any, path: str = '') -> List[Dict]: | |
| """Find specific differences between mock and real.""" | |
| differences = [] | |
| if type(mock) != type(real): | |
| differences.append({ | |
| 'path': path, | |
| 'type': 'type_mismatch', | |
| 'mock_type': type(mock).__name__, | |
| 'real_type': type(real).__name__, | |
| 'severity': 'high' | |
| }) | |
| return differences | |
| if isinstance(mock, dict): | |
| mock_keys = set(mock.keys()) | |
| real_keys = set(real.keys()) | |
| # Missing keys in mock | |
| missing_in_mock = real_keys - mock_keys | |
| for key in missing_in_mock: | |
| differences.append({ | |
| 'path': f"{path}.{key}" if path else key, | |
| 'type': 'missing_in_mock', | |
| 'real_value': real[key], | |
| 'severity': 'medium' | |
| }) | |
| # Extra keys in mock | |
| extra_in_mock = mock_keys - real_keys | |
| for key in extra_in_mock: | |
| differences.append({ | |
| 'path': f"{path}.{key}" if path else key, | |
| 'type': 'extra_in_mock', | |
| 'mock_value': mock[key], | |
| 'severity': 'low' | |
| }) | |
| # Compare common keys | |
| for key in mock_keys & real_keys: | |
| new_path = f"{path}.{key}" if path else key | |
| diffs = self._find_differences(mock[key], real[key], new_path) | |
| differences.extend(diffs) | |
| elif isinstance(mock, list): | |
| if len(mock) != len(real): | |
| differences.append({ | |
| 'path': path, | |
| 'type': 'length_mismatch', | |
| 'mock_length': len(mock), | |
| 'real_length': len(real), | |
| 'severity': 'medium' | |
| }) | |
| for i in range(min(len(mock), len(real))): | |
| new_path = f"{path}[{i}]" | |
| diffs = self._find_differences(mock[i], real[i], new_path) | |
| differences.extend(diffs) | |
| else: | |
| # Primitive values | |
| if mock != real: | |
| differences.append({ | |
| 'path': path, | |
| 'type': 'value_mismatch', | |
| 'mock_value': mock, | |
| 'real_value': real, | |
| 'severity': 'low' | |
| }) | |
| return differences | |
| def _generate_recommendations(self, differences: List[Dict]) -> List[str]: | |
| """Generate recommendations based on differences.""" | |
| recommendations = [] | |
| # Count by severity | |
| high_severity = [d for d in differences if d.get('severity') == 'high'] | |
| medium_severity = [d for d in differences if d.get('severity') == 'medium'] | |
| if high_severity: | |
| recommendations.append("β οΈ High priority: Fix type mismatches in mock response") | |
| if medium_severity: | |
| missing = [d for d in medium_severity if d['type'] == 'missing_in_mock'] | |
| if missing: | |
| paths = [d['path'] for d in missing[:3]] | |
| recommendations.append(f"π§ Add missing fields to mock: {', '.join(paths)}") | |
| if not differences: | |
| recommendations.append("β Mock response matches real API perfectly!") | |
| elif len(differences) < 5: | |
| recommendations.append("β¨ Mock is very accurate with minor differences") | |
| else: | |
| recommendations.append("π Consider updating mock to better match real API") | |
| return recommendations | |
| def _generate_summary(self, similarity: float, differences: List[Dict]) -> str: | |
| """Generate human-readable summary.""" | |
| if similarity >= 0.95: | |
| return f"Excellent match ({similarity:.1%} similar) - Mock is highly accurate" | |
| elif similarity >= 0.80: | |
| return f"Good match ({similarity:.1%} similar) - {len(differences)} minor differences" | |
| elif similarity >= 0.60: | |
| return f"Fair match ({similarity:.1%} similar) - {len(differences)} differences found" | |
| else: | |
| return f"Poor match ({similarity:.1%} similar) - Significant differences detected" | |
| def generate_report(self, comparison: Dict) -> str: | |
| """Generate a detailed comparison report.""" | |
| lines = [] | |
| lines.append("="*70) | |
| lines.append("MOCK vs REAL API COMPARISON REPORT") | |
| lines.append("="*70) | |
| lines.append("") | |
| lines.append(f"π Similarity Score: {comparison['similarity_score']:.1%}") | |
| lines.append(f"π Summary: {comparison['summary']}") | |
| lines.append("") | |
| if comparison['differences']: | |
| lines.append(f"π Found {len(comparison['differences'])} differences:") | |
| lines.append("") | |
| for i, diff in enumerate(comparison['differences'][:10], 1): | |
| lines.append(f"{i}. {diff['path']}") | |
| lines.append(f" Type: {diff['type']}") | |
| if 'mock_value' in diff and 'real_value' in diff: | |
| lines.append(f" Mock: {diff['mock_value']}") | |
| lines.append(f" Real: {diff['real_value']}") | |
| lines.append("") | |
| if len(comparison['differences']) > 10: | |
| lines.append(f"... and {len(comparison['differences']) - 10} more differences") | |
| lines.append("") | |
| else: | |
| lines.append("β No differences found - perfect match!") | |
| lines.append("") | |
| if comparison['recommendations']: | |
| lines.append("π‘ Recommendations:") | |
| for rec in comparison['recommendations']: | |
| lines.append(f" {rec}") | |
| lines.append("") | |
| lines.append("="*70) | |
| return "\n".join(lines) | |