ui-regression-testing-3 / agents /agent_3_visual_comparator.py
riazmo's picture
Upload 17 files
cfec14d verified
"""
Agent 3: Visual Comparator
Compares Figma and Website screenshots, generates diff overlays.
"""
from typing import Dict, Any
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.image_differ import ImageDiffer
def agent_3_node(state: Dict[str, Any]) -> Dict[str, Any]:
"""
Compare screenshots and generate visual diff report.
This agent:
1. Normalizes image sizes (handles Figma 2x)
2. Calculates similarity scores
3. Creates side-by-side comparison with diff overlay
4. Detects specific differences (color, layout, structure)
"""
print("\n" + "="*60)
print("πŸ” Agent 3: Visual Comparator - Analyzing Differences")
print("="*60)
figma_screenshots = state.get("figma_screenshots", {})
website_screenshots = state.get("website_screenshots", {})
figma_dims = state.get("figma_dimensions", {})
website_dims = state.get("website_dimensions", {})
execution_id = state.get("execution_id", "")
logs = state.get("logs", [])
if not figma_screenshots or not website_screenshots:
error_msg = "Missing screenshots for comparison"
print(f"\n ❌ {error_msg}")
return {
"status": "comparison_failed",
"error_message": error_msg,
"logs": logs + [f"❌ {error_msg}"]
}
try:
# Initialize differ
differ = ImageDiffer(output_dir="data/comparisons")
# Run comparison
results = differ.compare_all_viewports(
figma_screenshots=figma_screenshots,
website_screenshots=website_screenshots,
figma_dims=figma_dims,
website_dims=website_dims,
execution_id=execution_id
)
# Build comparison images dict
comparison_images = {}
for viewport, comparison in results["comparisons"].items():
comparison_images[viewport] = comparison["comparison_image"]
# Log results
print(f"\n" + "="*60)
print("πŸ“Š COMPARISON RESULTS")
print("="*60)
print(f" Overall Similarity: {results['overall_score']:.1f}%")
for viewport, score in results["viewport_scores"].items():
status_emoji = "βœ…" if score >= 90 else "⚠️" if score >= 70 else "❌"
print(f" {status_emoji} {viewport.capitalize()}: {score:.1f}%")
logs.append(f"{status_emoji} {viewport} similarity: {score:.1f}%")
if results["all_differences"]:
print(f"\n πŸ” Found {len(results['all_differences'])} differences:")
for diff in results["all_differences"]:
severity_emoji = "πŸ”΄" if diff["severity"] == "High" else "🟑" if diff["severity"] == "Medium" else "🟒"
print(f" {severity_emoji} [{diff['category']}] {diff['title']}")
logs.append(f"{severity_emoji} {diff['title']}")
else:
print(f"\n βœ… No significant differences detected!")
logs.append("βœ… No significant differences detected")
# Convert numpy types to Python native types for serialization
def convert_to_native(obj):
"""Convert numpy types to Python native types."""
import numpy as np
if isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, dict):
return {k: convert_to_native(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [convert_to_native(i) for i in obj]
return obj
# Convert all results to native Python types
similarity_scores_native = {k: float(v) for k, v in results["viewport_scores"].items()}
overall_score_native = float(results["overall_score"])
differences_native = convert_to_native(results["all_differences"])
return {
"comparison_images": comparison_images,
"visual_differences": differences_native,
"similarity_scores": similarity_scores_native,
"overall_score": overall_score_native,
"status": "analysis_complete",
"logs": logs
}
except Exception as e:
import traceback
error_msg = f"Comparison failed: {str(e)}"
print(f"\n ❌ {error_msg}")
traceback.print_exc()
return {
"status": "comparison_failed",
"error_message": error_msg,
"logs": logs + [f"❌ {error_msg}"]
}