Spaces:
Sleeping
Sleeping
File size: 9,477 Bytes
2358888 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 | """
Mock vs Real comparison utility.
Compares mock responses against real API responses to validate accuracy.
"""
from typing import Dict, Any, List
import json
from difflib import unified_diff
class MockRealComparator:
"""Compare mock and real API responses."""
def __init__(self):
self.differences = []
def compare(self, mock_response: Dict, real_response: Dict) -> Dict:
"""
Compare mock and real responses.
Args:
mock_response: Response from mock mode
real_response: Response from real API
Returns:
Comparison result with similarity score and differences
"""
# Extract actual data
mock_data = mock_response.get('data', {})
real_data = real_response.get('data', {})
# Calculate similarity
similarity = self._calculate_similarity(mock_data, real_data)
# Find differences
differences = self._find_differences(mock_data, real_data)
# Generate recommendations
recommendations = self._generate_recommendations(differences)
return {
'similarity_score': similarity,
'differences': differences,
'recommendations': recommendations,
'mock_accurate': similarity > 0.8,
'summary': self._generate_summary(similarity, differences)
}
def _calculate_similarity(self, mock: Any, real: Any, path: str = '') -> float:
"""
Calculate similarity score between mock and real data.
Returns score from 0.0 to 1.0.
"""
if type(mock) != type(real):
return 0.0
if isinstance(mock, dict):
if not mock and not real:
return 1.0
mock_keys = set(mock.keys())
real_keys = set(real.keys())
# Key similarity
common_keys = mock_keys & real_keys
all_keys = mock_keys | real_keys
if not all_keys:
return 1.0
key_similarity = len(common_keys) / len(all_keys)
# Value similarity for common keys
value_similarities = []
for key in common_keys:
val_sim = self._calculate_similarity(mock[key], real[key], f"{path}.{key}")
value_similarities.append(val_sim)
if value_similarities:
avg_value_similarity = sum(value_similarities) / len(value_similarities)
return (key_similarity + avg_value_similarity) / 2
return key_similarity
elif isinstance(mock, list):
if not mock and not real:
return 1.0
if len(mock) != len(real):
return 0.5 # Partial credit for list length difference
similarities = []
for i, (m, r) in enumerate(zip(mock, real)):
sim = self._calculate_similarity(m, r, f"{path}[{i}]")
similarities.append(sim)
return sum(similarities) / len(similarities) if similarities else 0.0
else:
# Primitive types
return 1.0 if mock == real else 0.0
def _find_differences(self, mock: Any, real: Any, path: str = '') -> List[Dict]:
"""Find specific differences between mock and real."""
differences = []
if type(mock) != type(real):
differences.append({
'path': path,
'type': 'type_mismatch',
'mock_type': type(mock).__name__,
'real_type': type(real).__name__,
'severity': 'high'
})
return differences
if isinstance(mock, dict):
mock_keys = set(mock.keys())
real_keys = set(real.keys())
# Missing keys in mock
missing_in_mock = real_keys - mock_keys
for key in missing_in_mock:
differences.append({
'path': f"{path}.{key}" if path else key,
'type': 'missing_in_mock',
'real_value': real[key],
'severity': 'medium'
})
# Extra keys in mock
extra_in_mock = mock_keys - real_keys
for key in extra_in_mock:
differences.append({
'path': f"{path}.{key}" if path else key,
'type': 'extra_in_mock',
'mock_value': mock[key],
'severity': 'low'
})
# Compare common keys
for key in mock_keys & real_keys:
new_path = f"{path}.{key}" if path else key
diffs = self._find_differences(mock[key], real[key], new_path)
differences.extend(diffs)
elif isinstance(mock, list):
if len(mock) != len(real):
differences.append({
'path': path,
'type': 'length_mismatch',
'mock_length': len(mock),
'real_length': len(real),
'severity': 'medium'
})
for i in range(min(len(mock), len(real))):
new_path = f"{path}[{i}]"
diffs = self._find_differences(mock[i], real[i], new_path)
differences.extend(diffs)
else:
# Primitive values
if mock != real:
differences.append({
'path': path,
'type': 'value_mismatch',
'mock_value': mock,
'real_value': real,
'severity': 'low'
})
return differences
def _generate_recommendations(self, differences: List[Dict]) -> List[str]:
"""Generate recommendations based on differences."""
recommendations = []
# Count by severity
high_severity = [d for d in differences if d.get('severity') == 'high']
medium_severity = [d for d in differences if d.get('severity') == 'medium']
if high_severity:
recommendations.append("β οΈ High priority: Fix type mismatches in mock response")
if medium_severity:
missing = [d for d in medium_severity if d['type'] == 'missing_in_mock']
if missing:
paths = [d['path'] for d in missing[:3]]
recommendations.append(f"π§ Add missing fields to mock: {', '.join(paths)}")
if not differences:
recommendations.append("β
Mock response matches real API perfectly!")
elif len(differences) < 5:
recommendations.append("β¨ Mock is very accurate with minor differences")
else:
recommendations.append("π Consider updating mock to better match real API")
return recommendations
def _generate_summary(self, similarity: float, differences: List[Dict]) -> str:
"""Generate human-readable summary."""
if similarity >= 0.95:
return f"Excellent match ({similarity:.1%} similar) - Mock is highly accurate"
elif similarity >= 0.80:
return f"Good match ({similarity:.1%} similar) - {len(differences)} minor differences"
elif similarity >= 0.60:
return f"Fair match ({similarity:.1%} similar) - {len(differences)} differences found"
else:
return f"Poor match ({similarity:.1%} similar) - Significant differences detected"
def generate_report(self, comparison: Dict) -> str:
"""Generate a detailed comparison report."""
lines = []
lines.append("="*70)
lines.append("MOCK vs REAL API COMPARISON REPORT")
lines.append("="*70)
lines.append("")
lines.append(f"π Similarity Score: {comparison['similarity_score']:.1%}")
lines.append(f"π Summary: {comparison['summary']}")
lines.append("")
if comparison['differences']:
lines.append(f"π Found {len(comparison['differences'])} differences:")
lines.append("")
for i, diff in enumerate(comparison['differences'][:10], 1):
lines.append(f"{i}. {diff['path']}")
lines.append(f" Type: {diff['type']}")
if 'mock_value' in diff and 'real_value' in diff:
lines.append(f" Mock: {diff['mock_value']}")
lines.append(f" Real: {diff['real_value']}")
lines.append("")
if len(comparison['differences']) > 10:
lines.append(f"... and {len(comparison['differences']) - 10} more differences")
lines.append("")
else:
lines.append("β
No differences found - perfect match!")
lines.append("")
if comparison['recommendations']:
lines.append("π‘ Recommendations:")
for rec in comparison['recommendations']:
lines.append(f" {rec}")
lines.append("")
lines.append("="*70)
return "\n".join(lines)
|